asda?‰PNG  IHDR ? f ??C1 sRGB ??é gAMA ±? üa pHYs ? ??o¨d GIDATx^íüL”÷e÷Y?a?("Bh?_ò???¢§?q5k?*:t0A-o??¥]VkJ¢M??f?±8\k2íll£1]q?ù???T PKtge[packaging/_structures.pynu[# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function class Infinity(object): def __repr__(self): return "Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return False def __le__(self, other): return False def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return True def __ge__(self, other): return True def __neg__(self): return NegativeInfinity Infinity = Infinity() class NegativeInfinity(object): def __repr__(self): return "-Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return True def __le__(self, other): return True def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return False def __ge__(self, other): return False def __neg__(self): return Infinity NegativeInfinity = NegativeInfinity() PKtge[<)Xpackaging/__about__.pynu[# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__author__", "__email__", "__license__", "__copyright__", ] __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "16.8" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD or Apache License, Version 2.0" __copyright__ = "Copyright 2014-2016 %s" % __author__ PKtge[ơ$-$-packaging/version.pynu[# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import collections import itertools import re from ._structures import Infinity __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" ] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"], ) def parse(version): """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. """ try: return Version(version) except InvalidVersion: return LegacyVersion(version) class InvalidVersion(ValueError): """ An invalid version was found, users should refer to PEP 440. """ class _BaseVersion(object): def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key) class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile( r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, ) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse VERSION_PATTERN = r""" v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_\.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
"""


class Version(_BaseVersion):

    _regex = re.compile(
        r"^\s*" + VERSION_PATTERN + r"\s*$",
        re.VERBOSE | re.IGNORECASE,
    )

    def __init__(self, version):
        # Validate the version and parse it into pieces
        match = self._regex.search(version)
        if not match:
            raise InvalidVersion("Invalid version: '{0}'".format(version))

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(
                match.group("pre_l"),
                match.group("pre_n"),
            ),
            post=_parse_letter_version(
                match.group("post_l"),
                match.group("post_n1") or match.group("post_n2"),
            ),
            dev=_parse_letter_version(
                match.group("dev_l"),
                match.group("dev_n"),
            ),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self):
        return "".format(repr(str(self)))

    def __str__(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        # Pre-release
        if self._version.pre is not None:
            parts.append("".join(str(x) for x in self._version.pre))

        # Post-release
        if self._version.post is not None:
            parts.append(".post{0}".format(self._version.post[1]))

        # Development release
        if self._version.dev is not None:
            parts.append(".dev{0}".format(self._version.dev[1]))

        # Local version segment
        if self._version.local is not None:
            parts.append(
                "+{0}".format(".".join(str(x) for x in self._version.local))
            )

        return "".join(parts)

    @property
    def public(self):
        return str(self).split("+", 1)[0]

    @property
    def base_version(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        return "".join(parts)

    @property
    def local(self):
        version_string = str(self)
        if "+" in version_string:
            return version_string.split("+", 1)[1]

    @property
    def is_prerelease(self):
        return bool(self._version.dev or self._version.pre)

    @property
    def is_postrelease(self):
        return bool(self._version.post)


def _parse_letter_version(letter, number):
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)
    if not letter and number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)


_local_version_seperators = re.compile(r"[\._-]")


def _parse_local_version(local):
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_seperators.split(local)
        )


def _cmpkey(epoch, release, pre, post, dev, local):
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    release = tuple(
        reversed(list(
            itertools.dropwhile(
                lambda x: x == 0,
                reversed(release),
            )
        ))
    )

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        pre = -Infinity
    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        pre = Infinity

    # Versions without a post segment should sort before those with one.
    if post is None:
        post = -Infinity

    # Versions without a development segment should sort after those with one.
    if dev is None:
        dev = Infinity

    if local is None:
        # Versions without a local segment should sort before those with one.
        local = -Infinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        local = tuple(
            (i, "") if isinstance(i, int) else (-Infinity, i)
            for i in local
        )

    return epoch, release, pre, post, dev, local
PKtge[iJ\\packaging/_compat.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import sys


PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3

# flake8: noqa

if PY3:
    string_types = str,
else:
    string_types = basestring,


def with_metaclass(meta, *bases):
    """
    Create a base class with a metaclass.
    """
    # This requires a bit of explanation: the basic idea is to make a dummy
    # metaclass for one level of class instantiation that replaces itself with
    # the actual metaclass.
    class metaclass(meta):
        def __new__(cls, name, this_bases, d):
            return meta(name, bases, d)
    return type.__new__(metaclass, 'temporary_class', (), {})
PKtge[packaging/requirements.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import string
import re

from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L  # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse

from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet


class InvalidRequirement(ValueError):
    """
    An invalid requirement was found, users should refer to PEP 508.
    """


ALPHANUM = Word(string.ascii_letters + string.digits)

LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()

PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))

NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER

URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)

EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")

VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)

VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
                       joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')

VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])

MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
    lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR

VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)

NAMED_REQUIREMENT = \
    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)

REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd


class Requirement(object):
    """Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    """

    # TODO: Can we test whether something is contained within a requirement?
    #       If so how do we do that? Do we need to test against the _name_ of
    #       the thing as well as the version? What about the markers?
    # TODO: Can we normalize the name and extra name?

    def __init__(self, requirement_string):
        try:
            req = REQUIREMENT.parseString(requirement_string)
        except ParseException as e:
            raise InvalidRequirement(
                "Invalid requirement, parse error at \"{0!r}\"".format(
                    requirement_string[e.loc:e.loc + 8]))

        self.name = req.name
        if req.url:
            parsed_url = urlparse.urlparse(req.url)
            if not (parsed_url.scheme and parsed_url.netloc) or (
                    not parsed_url.scheme and not parsed_url.netloc):
                raise InvalidRequirement("Invalid URL given")
            self.url = req.url
        else:
            self.url = None
        self.extras = set(req.extras.asList() if req.extras else [])
        self.specifier = SpecifierSet(req.specifier)
        self.marker = req.marker if req.marker else None

    def __str__(self):
        parts = [self.name]

        if self.extras:
            parts.append("[{0}]".format(",".join(sorted(self.extras))))

        if self.specifier:
            parts.append(str(self.specifier))

        if self.url:
            parts.append("@ {0}".format(self.url))

        if self.marker:
            parts.append("; {0}".format(self.marker))

        return "".join(parts)

    def __repr__(self):
        return "".format(str(self))
PKtge[|Eymympackaging/specifiers.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import abc
import functools
import itertools
import re

from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse


class InvalidSpecifier(ValueError):
    """
    An invalid specifier was found, users should refer to PEP 440.
    """


class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):

    @abc.abstractmethod
    def __str__(self):
        """
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        """

    @abc.abstractmethod
    def __hash__(self):
        """
        Returns a hash value for this Specifier like object.
        """

    @abc.abstractmethod
    def __eq__(self, other):
        """
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        """

    @abc.abstractmethod
    def __ne__(self, other):
        """
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        """

    @abc.abstractproperty
    def prereleases(self):
        """
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        """

    @prereleases.setter
    def prereleases(self, value):
        """
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        """

    @abc.abstractmethod
    def contains(self, item, prereleases=None):
        """
        Determines if the given item is contained within this specifier.
        """

    @abc.abstractmethod
    def filter(self, iterable, prereleases=None):
        """
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        """


class _IndividualSpecifier(BaseSpecifier):

    _operators = {}

    def __init__(self, spec="", prereleases=None):
        match = self._regex.search(spec)
        if not match:
            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))

        self._spec = (
            match.group("operator").strip(),
            match.group("version").strip(),
        )

        # Store whether or not this Specifier should accept prereleases
        self._prereleases = prereleases

    def __repr__(self):
        pre = (
            ", prereleases={0!r}".format(self.prereleases)
            if self._prereleases is not None
            else ""
        )

        return "<{0}({1!r}{2})>".format(
            self.__class__.__name__,
            str(self),
            pre,
        )

    def __str__(self):
        return "{0}{1}".format(*self._spec)

    def __hash__(self):
        return hash(self._spec)

    def __eq__(self, other):
        if isinstance(other, string_types):
            try:
                other = self.__class__(other)
            except InvalidSpecifier:
                return NotImplemented
        elif not isinstance(other, self.__class__):
            return NotImplemented

        return self._spec == other._spec

    def __ne__(self, other):
        if isinstance(other, string_types):
            try:
                other = self.__class__(other)
            except InvalidSpecifier:
                return NotImplemented
        elif not isinstance(other, self.__class__):
            return NotImplemented

        return self._spec != other._spec

    def _get_operator(self, op):
        return getattr(self, "_compare_{0}".format(self._operators[op]))

    def _coerce_version(self, version):
        if not isinstance(version, (LegacyVersion, Version)):
            version = parse(version)
        return version

    @property
    def operator(self):
        return self._spec[0]

    @property
    def version(self):
        return self._spec[1]

    @property
    def prereleases(self):
        return self._prereleases

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value

    def __contains__(self, item):
        return self.contains(item)

    def contains(self, item, prereleases=None):
        # Determine if prereleases are to be allowed or not.
        if prereleases is None:
            prereleases = self.prereleases

        # Normalize item to a Version or LegacyVersion, this allows us to have
        # a shortcut for ``"2.0" in Specifier(">=2")
        item = self._coerce_version(item)

        # Determine if we should be supporting prereleases in this specifier
        # or not, if we do not support prereleases than we can short circuit
        # logic if this version is a prereleases.
        if item.is_prerelease and not prereleases:
            return False

        # Actually do the comparison to determine if this item is contained
        # within this Specifier or not.
        return self._get_operator(self.operator)(item, self.version)

    def filter(self, iterable, prereleases=None):
        yielded = False
        found_prereleases = []

        kw = {"prereleases": prereleases if prereleases is not None else True}

        # Attempt to iterate over all the values in the iterable and if any of
        # them match, yield them.
        for version in iterable:
            parsed_version = self._coerce_version(version)

            if self.contains(parsed_version, **kw):
                # If our version is a prerelease, and we were not set to allow
                # prereleases, then we'll store it for later incase nothing
                # else matches this specifier.
                if (parsed_version.is_prerelease and not
                        (prereleases or self.prereleases)):
                    found_prereleases.append(version)
                # Either this is not a prerelease, or we should have been
                # accepting prereleases from the begining.
                else:
                    yielded = True
                    yield version

        # Now that we've iterated over everything, determine if we've yielded
        # any values, and if we have not and we have any prereleases stored up
        # then we will go ahead and yield the prereleases.
        if not yielded and found_prereleases:
            for version in found_prereleases:
                yield version


class LegacySpecifier(_IndividualSpecifier):

    _regex_str = (
        r"""
        (?P(==|!=|<=|>=|<|>))
        \s*
        (?P
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        """
    )

    _regex = re.compile(
        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)

    _operators = {
        "==": "equal",
        "!=": "not_equal",
        "<=": "less_than_equal",
        ">=": "greater_than_equal",
        "<": "less_than",
        ">": "greater_than",
    }

    def _coerce_version(self, version):
        if not isinstance(version, LegacyVersion):
            version = LegacyVersion(str(version))
        return version

    def _compare_equal(self, prospective, spec):
        return prospective == self._coerce_version(spec)

    def _compare_not_equal(self, prospective, spec):
        return prospective != self._coerce_version(spec)

    def _compare_less_than_equal(self, prospective, spec):
        return prospective <= self._coerce_version(spec)

    def _compare_greater_than_equal(self, prospective, spec):
        return prospective >= self._coerce_version(spec)

    def _compare_less_than(self, prospective, spec):
        return prospective < self._coerce_version(spec)

    def _compare_greater_than(self, prospective, spec):
        return prospective > self._coerce_version(spec)


def _require_version_compare(fn):
    @functools.wraps(fn)
    def wrapped(self, prospective, spec):
        if not isinstance(prospective, Version):
            return False
        return fn(self, prospective, spec)
    return wrapped


class Specifier(_IndividualSpecifier):

    _regex_str = (
        r"""
        (?P(~=|==|!=|<=|>=|<|>|===))
        (?P
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?=": "greater_than_equal",
        "<": "less_than",
        ">": "greater_than",
        "===": "arbitrary",
    }

    @_require_version_compare
    def _compare_compatible(self, prospective, spec):
        # Compatible releases have an equivalent combination of >= and ==. That
        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
        # implement this in terms of the other specifiers instead of
        # implementing it ourselves. The only thing we need to do is construct
        # the other specifiers.

        # We want everything but the last item in the version, but we want to
        # ignore post and dev releases and we want to treat the pre-release as
        # it's own separate segment.
        prefix = ".".join(
            list(
                itertools.takewhile(
                    lambda x: (not x.startswith("post") and not
                               x.startswith("dev")),
                    _version_split(spec),
                )
            )[:-1]
        )

        # Add the prefix notation to the end of our string
        prefix += ".*"

        return (self._get_operator(">=")(prospective, spec) and
                self._get_operator("==")(prospective, prefix))

    @_require_version_compare
    def _compare_equal(self, prospective, spec):
        # We need special logic to handle prefix matching
        if spec.endswith(".*"):
            # In the case of prefix matching we want to ignore local segment.
            prospective = Version(prospective.public)
            # Split the spec out by dots, and pretend that there is an implicit
            # dot in between a release segment and a pre-release segment.
            spec = _version_split(spec[:-2])  # Remove the trailing .*

            # Split the prospective version out by dots, and pretend that there
            # is an implicit dot in between a release segment and a pre-release
            # segment.
            prospective = _version_split(str(prospective))

            # Shorten the prospective version to be the same length as the spec
            # so that we can determine if the specifier is a prefix of the
            # prospective version or not.
            prospective = prospective[:len(spec)]

            # Pad out our two sides with zeros so that they both equal the same
            # length.
            spec, prospective = _pad_version(spec, prospective)
        else:
            # Convert our spec string into a Version
            spec = Version(spec)

            # If the specifier does not have a local segment, then we want to
            # act as if the prospective version also does not have a local
            # segment.
            if not spec.local:
                prospective = Version(prospective.public)

        return prospective == spec

    @_require_version_compare
    def _compare_not_equal(self, prospective, spec):
        return not self._compare_equal(prospective, spec)

    @_require_version_compare
    def _compare_less_than_equal(self, prospective, spec):
        return prospective <= Version(spec)

    @_require_version_compare
    def _compare_greater_than_equal(self, prospective, spec):
        return prospective >= Version(spec)

    @_require_version_compare
    def _compare_less_than(self, prospective, spec):
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec)

        # Check to see if the prospective version is less than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective < spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a pre-release version, that we do not accept pre-release
        # versions for the version mentioned in the specifier (e.g. <3.1 should
        # not match 3.1.dev0, but should match 3.0.dev0).
        if not spec.is_prerelease and prospective.is_prerelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # less than the spec version *and* it's not a pre-release of the same
        # version in the spec.
        return True

    @_require_version_compare
    def _compare_greater_than(self, prospective, spec):
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec)

        # Check to see if the prospective version is greater than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective > spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a post-release version, that we do not accept
        # post-release versions for the version mentioned in the specifier
        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
        if not spec.is_postrelease and prospective.is_postrelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # Ensure that we do not allow a local version of the version mentioned
        # in the specifier, which is techincally greater than, to match.
        if prospective.local is not None:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # greater than the spec version *and* it's not a pre-release of the
        # same version in the spec.
        return True

    def _compare_arbitrary(self, prospective, spec):
        return str(prospective).lower() == str(spec).lower()

    @property
    def prereleases(self):
        # If there is an explicit prereleases set for this, then we'll just
        # blindly use that.
        if self._prereleases is not None:
            return self._prereleases

        # Look at all of our specifiers and determine if they are inclusive
        # operators, and if they are if they are including an explicit
        # prerelease.
        operator, version = self._spec
        if operator in ["==", ">=", "<=", "~=", "==="]:
            # The == specifier can include a trailing .*, if it does we
            # want to remove before parsing.
            if operator == "==" and version.endswith(".*"):
                version = version[:-2]

            # Parse the version, and if it is a pre-release than this
            # specifier allows pre-releases.
            if parse(version).is_prerelease:
                return True

        return False

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value


_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")


def _version_split(version):
    result = []
    for item in version.split("."):
        match = _prefix_regex.search(item)
        if match:
            result.extend(match.groups())
        else:
            result.append(item)
    return result


def _pad_version(left, right):
    left_split, right_split = [], []

    # Get the release segment of our versions
    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))

    # Get the rest of our versions
    left_split.append(left[len(left_split[0]):])
    right_split.append(right[len(right_split[0]):])

    # Insert our padding
    left_split.insert(
        1,
        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
    )
    right_split.insert(
        1,
        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
    )

    return (
        list(itertools.chain(*left_split)),
        list(itertools.chain(*right_split)),
    )


class SpecifierSet(BaseSpecifier):

    def __init__(self, specifiers="", prereleases=None):
        # Split on , to break each indidivual specifier into it's own item, and
        # strip each item to remove leading/trailing whitespace.
        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]

        # Parsed each individual specifier, attempting first to make it a
        # Specifier and falling back to a LegacySpecifier.
        parsed = set()
        for specifier in specifiers:
            try:
                parsed.add(Specifier(specifier))
            except InvalidSpecifier:
                parsed.add(LegacySpecifier(specifier))

        # Turn our parsed specifiers into a frozen set and save them for later.
        self._specs = frozenset(parsed)

        # Store our prereleases value so we can use it later to determine if
        # we accept prereleases or not.
        self._prereleases = prereleases

    def __repr__(self):
        pre = (
            ", prereleases={0!r}".format(self.prereleases)
            if self._prereleases is not None
            else ""
        )

        return "".format(str(self), pre)

    def __str__(self):
        return ",".join(sorted(str(s) for s in self._specs))

    def __hash__(self):
        return hash(self._specs)

    def __and__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        specifier = SpecifierSet()
        specifier._specs = frozenset(self._specs | other._specs)

        if self._prereleases is None and other._prereleases is not None:
            specifier._prereleases = other._prereleases
        elif self._prereleases is not None and other._prereleases is None:
            specifier._prereleases = self._prereleases
        elif self._prereleases == other._prereleases:
            specifier._prereleases = self._prereleases
        else:
            raise ValueError(
                "Cannot combine SpecifierSets with True and False prerelease "
                "overrides."
            )

        return specifier

    def __eq__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif isinstance(other, _IndividualSpecifier):
            other = SpecifierSet(str(other))
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        return self._specs == other._specs

    def __ne__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif isinstance(other, _IndividualSpecifier):
            other = SpecifierSet(str(other))
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        return self._specs != other._specs

    def __len__(self):
        return len(self._specs)

    def __iter__(self):
        return iter(self._specs)

    @property
    def prereleases(self):
        # If we have been given an explicit prerelease modifier, then we'll
        # pass that through here.
        if self._prereleases is not None:
            return self._prereleases

        # If we don't have any specifiers, and we don't have a forced value,
        # then we'll just return None since we don't know if this should have
        # pre-releases or not.
        if not self._specs:
            return None

        # Otherwise we'll see if any of the given specifiers accept
        # prereleases, if any of them do we'll return True, otherwise False.
        return any(s.prereleases for s in self._specs)

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value

    def __contains__(self, item):
        return self.contains(item)

    def contains(self, item, prereleases=None):
        # Ensure that our item is a Version or LegacyVersion instance.
        if not isinstance(item, (LegacyVersion, Version)):
            item = parse(item)

        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # We can determine if we're going to allow pre-releases by looking to
        # see if any of the underlying items supports them. If none of them do
        # and this item is a pre-release then we do not allow it and we can
        # short circuit that here.
        # Note: This means that 1.0.dev1 would not be contained in something
        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
        if not prereleases and item.is_prerelease:
            return False

        # We simply dispatch to the underlying specs here to make sure that the
        # given version is contained within all of them.
        # Note: This use of all() here means that an empty set of specifiers
        #       will always return True, this is an explicit design decision.
        return all(
            s.contains(item, prereleases=prereleases)
            for s in self._specs
        )

    def filter(self, iterable, prereleases=None):
        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # If we have any specifiers, then we want to wrap our iterable in the
        # filter method for each one, this will act as a logical AND amongst
        # each specifier.
        if self._specs:
            for spec in self._specs:
                iterable = spec.filter(iterable, prereleases=bool(prereleases))
            return iterable
        # If we do not have any specifiers, then we need to have a rough filter
        # which will filter out any pre-releases, unless there are no final
        # releases, and which will filter out LegacyVersion in general.
        else:
            filtered = []
            found_prereleases = []

            for item in iterable:
                # Ensure that we some kind of Version class for this item.
                if not isinstance(item, (LegacyVersion, Version)):
                    parsed_version = parse(item)
                else:
                    parsed_version = item

                # Filter out any item which is parsed as a LegacyVersion
                if isinstance(parsed_version, LegacyVersion):
                    continue

                # Store any item which is a pre-release for later unless we've
                # already found a final version or we are accepting prereleases
                if parsed_version.is_prerelease and not prereleases:
                    if not filtered:
                        found_prereleases.append(item)
                else:
                    filtered.append(item)

            # If we've found no items except for pre-releases, then we'll go
            # ahead and use the pre-releases
            if not filtered and found_prereleases and prereleases is None:
                return found_prereleases

            return filtered
PKtge[08 8 packaging/markers.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import operator
import os
import platform
import sys

from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pkg_resources.extern.pyparsing import Literal as L  # noqa

from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier


__all__ = [
    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
    "Marker", "default_environment",
]


class InvalidMarker(ValueError):
    """
    An invalid marker was found, users should refer to PEP 508.
    """


class UndefinedComparison(ValueError):
    """
    An invalid operation was attempted on a value that doesn't support it.
    """


class UndefinedEnvironmentName(ValueError):
    """
    A name was attempted to be used that does not exist inside of the
    environment.
    """


class Node(object):

    def __init__(self, value):
        self.value = value

    def __str__(self):
        return str(self.value)

    def __repr__(self):
        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))

    def serialize(self):
        raise NotImplementedError


class Variable(Node):

    def serialize(self):
        return str(self)


class Value(Node):

    def serialize(self):
        return '"{0}"'.format(self)


class Op(Node):

    def serialize(self):
        return str(self)


VARIABLE = (
    L("implementation_version") |
    L("platform_python_implementation") |
    L("implementation_name") |
    L("python_full_version") |
    L("platform_release") |
    L("platform_version") |
    L("platform_machine") |
    L("platform_system") |
    L("python_version") |
    L("sys_platform") |
    L("os_name") |
    L("os.name") |  # PEP-345
    L("sys.platform") |  # PEP-345
    L("platform.version") |  # PEP-345
    L("platform.machine") |  # PEP-345
    L("platform.python_implementation") |  # PEP-345
    L("python_implementation") |  # undocumented setuptools legacy
    L("extra")
)
ALIASES = {
    'os.name': 'os_name',
    'sys.platform': 'sys_platform',
    'platform.version': 'platform_version',
    'platform.machine': 'platform_machine',
    'platform.python_implementation': 'platform_python_implementation',
    'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))

VERSION_CMP = (
    L("===") |
    L("==") |
    L(">=") |
    L("<=") |
    L("!=") |
    L("~=") |
    L(">") |
    L("<")
)

MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))

MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))

BOOLOP = L("and") | L("or")

MARKER_VAR = VARIABLE | MARKER_VALUE

MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))

LPAREN = L("(").suppress()
RPAREN = L(")").suppress()

MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)

MARKER = stringStart + MARKER_EXPR + stringEnd


def _coerce_parse_result(results):
    if isinstance(results, ParseResults):
        return [_coerce_parse_result(i) for i in results]
    else:
        return results


def _format_marker(marker, first=True):
    assert isinstance(marker, (list, tuple, string_types))

    # Sometimes we have a structure like [[...]] which is a single item list
    # where the single item is itself it's own list. In that case we want skip
    # the rest of this function so that we don't get extraneous () on the
    # outside.
    if (isinstance(marker, list) and len(marker) == 1 and
            isinstance(marker[0], (list, tuple))):
        return _format_marker(marker[0])

    if isinstance(marker, list):
        inner = (_format_marker(m, first=False) for m in marker)
        if first:
            return " ".join(inner)
        else:
            return "(" + " ".join(inner) + ")"
    elif isinstance(marker, tuple):
        return " ".join([m.serialize() for m in marker])
    else:
        return marker


_operators = {
    "in": lambda lhs, rhs: lhs in rhs,
    "not in": lambda lhs, rhs: lhs not in rhs,
    "<": operator.lt,
    "<=": operator.le,
    "==": operator.eq,
    "!=": operator.ne,
    ">=": operator.ge,
    ">": operator.gt,
}


def _eval_op(lhs, op, rhs):
    try:
        spec = Specifier("".join([op.serialize(), rhs]))
    except InvalidSpecifier:
        pass
    else:
        return spec.contains(lhs)

    oper = _operators.get(op.serialize())
    if oper is None:
        raise UndefinedComparison(
            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
        )

    return oper(lhs, rhs)


_undefined = object()


def _get_env(environment, name):
    value = environment.get(name, _undefined)

    if value is _undefined:
        raise UndefinedEnvironmentName(
            "{0!r} does not exist in evaluation environment.".format(name)
        )

    return value


def _evaluate_markers(markers, environment):
    groups = [[]]

    for marker in markers:
        assert isinstance(marker, (list, tuple, string_types))

        if isinstance(marker, list):
            groups[-1].append(_evaluate_markers(marker, environment))
        elif isinstance(marker, tuple):
            lhs, op, rhs = marker

            if isinstance(lhs, Variable):
                lhs_value = _get_env(environment, lhs.value)
                rhs_value = rhs.value
            else:
                lhs_value = lhs.value
                rhs_value = _get_env(environment, rhs.value)

            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
        else:
            assert marker in ["and", "or"]
            if marker == "or":
                groups.append([])

    return any(all(item) for item in groups)


def format_full_version(info):
    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
    kind = info.releaselevel
    if kind != 'final':
        version += kind[0] + str(info.serial)
    return version


def default_environment():
    if hasattr(sys, 'implementation'):
        iver = format_full_version(sys.implementation.version)
        implementation_name = sys.implementation.name
    else:
        iver = '0'
        implementation_name = ''

    return {
        "implementation_name": implementation_name,
        "implementation_version": iver,
        "os_name": os.name,
        "platform_machine": platform.machine(),
        "platform_release": platform.release(),
        "platform_system": platform.system(),
        "platform_version": platform.version(),
        "python_full_version": platform.python_version(),
        "platform_python_implementation": platform.python_implementation(),
        "python_version": platform.python_version()[:3],
        "sys_platform": sys.platform,
    }


class Marker(object):

    def __init__(self, marker):
        try:
            self._markers = _coerce_parse_result(MARKER.parseString(marker))
        except ParseException as e:
            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
                marker, marker[e.loc:e.loc + 8])
            raise InvalidMarker(err_str)

    def __str__(self):
        return _format_marker(self._markers)

    def __repr__(self):
        return "".format(str(self))

    def evaluate(self, environment=None):
        """Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        """
        current_environment = default_environment()
        if environment is not None:
            current_environment.update(environment)

        return _evaluate_markers(self._markers, current_environment)
PKtge['packaging/utils.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import re


_canonicalize_regex = re.compile(r"[-_.]+")


def canonicalize_name(name):
    # This is taken from PEP 503.
    return _canonicalize_regex.sub("-", name).lower()
PKtge[{

6packaging/__pycache__/_structures.cpython-36.opt-1.pycnu[3

vh@sDddlmZmZmZGdddeZeZGdddeZeZdS))absolute_importdivisionprint_functionc@sTeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
ddZdS)InfinitycCsdS)Nr)selfrr!/usr/lib/python3.6/_structures.py__repr__	szInfinity.__repr__cCstt|S)N)hashrepr)rrrr__hash__szInfinity.__hash__cCsdS)NFr)rotherrrr__lt__szInfinity.__lt__cCsdS)NFr)rr
rrr__le__szInfinity.__le__cCst||jS)N)
isinstance	__class__)rr
rrr__eq__szInfinity.__eq__cCst||jS)N)rr)rr
rrr__ne__szInfinity.__ne__cCsdS)NTr)rr
rrr__gt__szInfinity.__gt__cCsdS)NTr)rr
rrr__ge__szInfinity.__ge__cCstS)N)NegativeInfinity)rrrr__neg__!szInfinity.__neg__N)__name__
__module____qualname__r	rrrrrrrrrrrrrsrc@sTeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
ddZdS)rcCsdS)Nz	-Infinityr)rrrrr	)szNegativeInfinity.__repr__cCstt|S)N)r
r)rrrrr,szNegativeInfinity.__hash__cCsdS)NTr)rr
rrrr/szNegativeInfinity.__lt__cCsdS)NTr)rr
rrrr2szNegativeInfinity.__le__cCst||jS)N)rr)rr
rrrr5szNegativeInfinity.__eq__cCst||jS)N)rr)rr
rrrr8szNegativeInfinity.__ne__cCsdS)NFr)rr
rrrr;szNegativeInfinity.__gt__cCsdS)NFr)rr
rrrr>szNegativeInfinity.__ge__cCstS)N)r)rrrrrAszNegativeInfinity.__neg__N)rrrr	rrrrrrrrrrrrr'srN)Z
__future__rrrobjectrrrrrrsPKtge[tsMM/packaging/__pycache__/specifiers.cpython-36.pycnu[3

vhym@sddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZm
Z
mZGdddeZGdd	d	e
ejeZGd
ddeZGdd
d
eZddZGdddeZejdZddZddZGdddeZdS))absolute_importdivisionprint_functionN)string_typeswith_metaclass)Version
LegacyVersionparsec@seZdZdZdS)InvalidSpecifierzH
    An invalid specifier was found, users should refer to PEP 440.
    N)__name__
__module____qualname____doc__rr /usr/lib/python3.6/specifiers.pyrsrc@seZdZejddZejddZejddZejddZej	d	d
Z
e
jdd
Z
ejdd
dZejdddZ
dS)
BaseSpecifiercCsdS)z
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        Nr)selfrrr__str__szBaseSpecifier.__str__cCsdS)zF
        Returns a hash value for this Specifier like object.
        Nr)rrrr__hash__szBaseSpecifier.__hash__cCsdS)zq
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        Nr)rotherrrr__eq__$szBaseSpecifier.__eq__cCsdS)zu
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        Nr)rrrrr__ne__+szBaseSpecifier.__ne__cCsdS)zg
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr)rrrrprereleases2szBaseSpecifier.prereleasescCsdS)zd
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr)rvaluerrrr9sNcCsdS)zR
        Determines if the given item is contained within this specifier.
        Nr)ritemrrrrcontains@szBaseSpecifier.containscCsdS)z
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        Nr)riterablerrrrfilterFszBaseSpecifier.filter)N)N)rr
rabcabstractmethodrrrrabstractpropertyrsetterrrrrrrrsrc@seZdZiZd ddZddZddZd	d
ZddZd
dZ	ddZ
ddZeddZ
eddZeddZejddZddZd!ddZd"ddZdS)#_IndividualSpecifierNcCsF|jj|}|stdj||jdj|jdjf|_||_dS)NzInvalid specifier: '{0}'operatorversion)_regexsearchrformatgroupstrip_spec_prereleases)rspecrmatchrrr__init__Rsz_IndividualSpecifier.__init__cCs0|jdk	rdj|jnd}dj|jjt||S)Nz, prereleases={0!r}r$z<{0}({1!r}{2})>)r-r)r	__class__rstr)rprerrr__repr___sz_IndividualSpecifier.__repr__cCsdj|jS)Nz{0}{1})r)r,)rrrrrlsz_IndividualSpecifier.__str__cCs
t|jS)N)hashr,)rrrrrosz_IndividualSpecifier.__hash__cCsLt|tr0y|j|}Wq@tk
r,tSXnt||js@tS|j|jkS)N)
isinstancerr1rNotImplementedr,)rrrrrrrs
z_IndividualSpecifier.__eq__cCsLt|tr0y|j|}Wq@tk
r,tSXnt||js@tS|j|jkS)N)r6rr1rr7r,)rrrrrr}s
z_IndividualSpecifier.__ne__cCst|dj|j|S)Nz_compare_{0})getattrr)
_operators)roprrr
_get_operatorsz"_IndividualSpecifier._get_operatorcCst|ttfst|}|S)N)r6r	rr
)rr&rrr_coerce_versionsz$_IndividualSpecifier._coerce_versioncCs
|jdS)Nr)r,)rrrrr%sz_IndividualSpecifier.operatorcCs
|jdS)Nr)r,)rrrrr&sz_IndividualSpecifier.versioncCs|jS)N)r-)rrrrrsz _IndividualSpecifier.prereleasescCs
||_dS)N)r-)rrrrrrscCs
|j|S)N)r)rrrrr__contains__sz!_IndividualSpecifier.__contains__cCs<|dkr|j}|j|}|jr(|r(dS|j|j||jS)NF)rr<
is_prereleaser;r%r&)rrrrrrrs
z_IndividualSpecifier.containsccsd}g}d|dk	r|ndi}xL|D]D}|j|}|j|f|r"|jr\|pL|jr\|j|q"d}|Vq"W|r|rx|D]
}|VqzWdS)NFrT)r<rr>rappend)rrrZyieldedfound_prereleaseskwr&parsed_versionrrrrs




z_IndividualSpecifier.filter)r$N)N)N)rr
rr9r0r4rrrrr;r<propertyr%r&rr"r=rrrrrrr#Ns 



r#c@sveZdZdZejdedejejBZdddddd	d
Z	ddZ
d
dZddZddZ
ddZddZddZdS)LegacySpecifiera
        (?P(==|!=|<=|>=|<|>))
        \s*
        (?P
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        z^\s*z\s*$equal	not_equalless_than_equalgreater_than_equal	less_thangreater_than)z==z!=z<=z>=<>cCst|tstt|}|S)N)r6r	r2)rr&rrrr<s
zLegacySpecifier._coerce_versioncCs||j|kS)N)r<)rprospectiver.rrr_compare_equalszLegacySpecifier._compare_equalcCs||j|kS)N)r<)rrMr.rrr_compare_not_equalsz"LegacySpecifier._compare_not_equalcCs||j|kS)N)r<)rrMr.rrr_compare_less_than_equalsz(LegacySpecifier._compare_less_than_equalcCs||j|kS)N)r<)rrMr.rrr_compare_greater_than_equalsz+LegacySpecifier._compare_greater_than_equalcCs||j|kS)N)r<)rrMr.rrr_compare_less_thansz"LegacySpecifier._compare_less_thancCs||j|kS)N)r<)rrMr.rrr_compare_greater_thansz%LegacySpecifier._compare_greater_thanN)rr
r
_regex_strrecompileVERBOSE
IGNORECASEr'r9r<rNrOrPrQrRrSrrrrrDs 
rDcstjfdd}|S)Ncst|tsdS|||S)NF)r6r)rrMr.)fnrrwrappeds
z)_require_version_compare..wrapped)	functoolswraps)rYrZr)rYr_require_version_compare
sr]c	@seZdZdZejdedejejBZdddddd	d
ddZ	e
d
dZe
ddZe
ddZ
e
ddZe
ddZe
ddZe
ddZddZeddZejddZd S)!	Specifiera
        (?P(~=|==|!=|<=|>=|<|>|===))
        (?P
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?=rKrLz===cCsNdjttjddt|dd}|d7}|jd||oL|jd||S)	N.cSs|jdo|jdS)NZpostZdev)
startswith)xrrrsz/Specifier._compare_compatible..rz.*z>=z==)joinlist	itertools	takewhile_version_splitr;)rrMr.prefixrrr_compare_compatibles
zSpecifier._compare_compatiblecCsp|jdrPt|j}t|dd}tt|}|dt|}t||\}}nt|}|jsht|j}||kS)Nz.*)endswithrZpublicrhr2len_pad_versionlocal)rrMr.rrrrNs


zSpecifier._compare_equalcCs|j||S)N)rN)rrMr.rrrrOszSpecifier._compare_not_equalcCs|t|kS)N)r)rrMr.rrrrPsz"Specifier._compare_less_than_equalcCs|t|kS)N)r)rrMr.rrrrQsz%Specifier._compare_greater_than_equalcCs>t|}||ksdS|jr:|jr:t|jt|jkr:dSdS)NFT)rr>base_version)rrMr.rrrrRszSpecifier._compare_less_thancCs`t|}||ksdS|jr:|jr:t|jt|jkr:dS|jdk	r\t|jt|jkr\dSdS)NFT)rZis_postreleaserqrp)rrMr.rrrrSs
zSpecifier._compare_greater_thancCst|jt|jkS)N)r2lower)rrMr.rrr_compare_arbitraryszSpecifier._compare_arbitrarycCsR|jdk	r|jS|j\}}|d
krN|dkr@|jdr@|dd}t|jrNdSd	S)N==>=<=~====z.*rkTF)rtrurvrwrxrl)r-r,rmr
r>)rr%r&rrrrs


zSpecifier.prereleasescCs
||_dS)N)r-)rrrrrrsN)rr
rrTrUrVrWrXr'r9r]rjrNrOrPrQrRrSrsrCrr"rrrrr^s*^#r^z^([0-9]+)((?:a|b|c|rc)[0-9]+)$cCsDg}x:|jdD],}tj|}|r2|j|jq|j|qW|S)Nr_)split
_prefix_regexr(extendgroupsr?)r&resultrr/rrrrh's
rhc	Csgg}}|jttjdd||jttjdd||j|t|dd|j|t|dd|jddgtdt|dt|d|jddgtdt|dt|dttj|ttj|fS)NcSs|jS)N)isdigit)rarrrrb6sz_pad_version..cSs|jS)N)r~)rarrrrb7srr0)r?rerfrgrninsertmaxchain)leftrightZ
left_splitZright_splitrrrro2s
&&roc@seZdZdddZddZddZd	d
ZddZd
dZddZ	ddZ
ddZeddZ
e
jddZ
ddZdddZd ddZdS)!SpecifierSetr$NcCsrdd|jdD}t}xB|D]:}y|jt|Wq tk
rX|jt|Yq Xq Wt||_||_dS)NcSsg|]}|jr|jqSr)r+).0srrr
Rsz)SpecifierSet.__init__..,)	rysetaddr^rrD	frozenset_specsr-)rZ
specifiersrZparsed	specifierrrrr0Os

zSpecifierSet.__init__cCs*|jdk	rdj|jnd}djt||S)Nz, prereleases={0!r}r$z)r-r)rr2)rr3rrrr4dszSpecifierSet.__repr__cCsdjtdd|jDS)Nrcss|]}t|VqdS)N)r2)rrrrr	nsz'SpecifierSet.__str__..)rdsortedr)rrrrrmszSpecifierSet.__str__cCs
t|jS)N)r5r)rrrrrpszSpecifierSet.__hash__cCst|trt|}nt|ts"tSt}t|j|jB|_|jdkrX|jdk	rX|j|_n<|jdk	rv|jdkrv|j|_n|j|jkr|j|_ntd|S)NzFCannot combine SpecifierSets with True and False prerelease overrides.)r6rrr7rrr-
ValueError)rrrrrr__and__ss





zSpecifierSet.__and__cCsFt|trt|}n&t|tr,tt|}nt|ts:tS|j|jkS)N)r6rrr#r2r7r)rrrrrrs



zSpecifierSet.__eq__cCsFt|trt|}n&t|tr,tt|}nt|ts:tS|j|jkS)N)r6rrr#r2r7r)rrrrrrs



zSpecifierSet.__ne__cCs
t|jS)N)rnr)rrrr__len__szSpecifierSet.__len__cCs
t|jS)N)iterr)rrrr__iter__szSpecifierSet.__iter__cCs.|jdk	r|jS|jsdStdd|jDS)Ncss|]}|jVqdS)N)r)rrrrrrsz+SpecifierSet.prereleases..)r-rany)rrrrrs

zSpecifierSet.prereleasescCs
||_dS)N)r-)rrrrrrscCs
|j|S)N)r)rrrrrr=szSpecifierSet.__contains__csNtttfstdkr$|jr4jr4dStfdd|jDS)NFc3s|]}|jdVqdS))rN)r)rr)rrrrrsz(SpecifierSet.contains..)r6r	rr
rr>allr)rrrr)rrrrszSpecifierSet.containscCs|dkr|j}|jr:x |jD]}|j|t|d}qW|Sg}g}xZ|D]R}t|ttfsdt|}n|}t|trtqH|jr|r|s|j	|qH|j	|qHW|r|r|dkr|S|SdS)N)r)
rrrboolr6r	rr
r>r?)rrrr.Zfilteredr@rrBrrrrs*


zSpecifierSet.filter)r$N)N)N)rr
rr0r4rrrrrrrrCrr"r=rrrrrrrMs
	


r)Z
__future__rrrrr[rfrUZ_compatrrr&rr	r
rrABCMetaobjectrr#rDr]r^rVrzrhrorrrrrs&9	4	
PKtge[v2*!!2packaging/__pycache__/markers.cpython-36.opt-1.pycnu[3

vh8 	@s@ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
mZmZddlm
Z
mZmZmZddlmZddlmZddlmZmZd	d
ddd
gZGdd	d	eZGdd
d
eZGdddeZGdddeZGdddeZGdddeZ GdddeZ!ededBedBedBedBedBedBed Bed!Bed"Bed#Bed$Bed%Bed&Bed'Bed(Bed)Bed*BZ"d#d"ddddd+Z#e"j$d,d-ed.ed/Bed0Bed1Bed2Bed3Bed4Bed5BZ%e%ed6Bed7BZ&e&j$d8d-ed9ed:BZ'e'j$d;d-ed<ed=BZ(e"e'BZ)ee)e&e)Z*e*j$d>d-ed?j+Z,ed@j+Z-eZ.e*ee,e.e-BZ/e.e/e
e(e.>ee.eZ0dAdBZ1dSdDdEZ2dFd-dGd-ej3ej4ej5ej6ej7ej8dHZ9dIdJZ:eZ;dKdLZdQd
Z?GdRddeZ@dS)T)absolute_importdivisionprint_functionN)ParseExceptionParseResultsstringStart	stringEnd)
ZeroOrMoreGroupForwardQuotedString)Literal)string_types)	SpecifierInvalidSpecifier
InvalidMarkerUndefinedComparisonUndefinedEnvironmentNameMarkerdefault_environmentc@seZdZdZdS)rzE
    An invalid marker was found, users should refer to PEP 508.
    N)__name__
__module____qualname____doc__rr/usr/lib/python3.6/markers.pyrsc@seZdZdZdS)rzP
    An invalid operation was attempted on a value that doesn't support it.
    N)rrrrrrrrrsc@seZdZdZdS)rz\
    A name was attempted to be used that does not exist inside of the
    environment.
    N)rrrrrrrrr%sc@s,eZdZddZddZddZddZd	S)
NodecCs
||_dS)N)value)selfrrrr__init__.sz
Node.__init__cCs
t|jS)N)strr)rrrr__str__1szNode.__str__cCsdj|jjt|S)Nz<{0}({1!r})>)format	__class__rr!)rrrr__repr__4sz
Node.__repr__cCstdS)N)NotImplementedError)rrrr	serialize7szNode.serializeN)rrrr r"r%r'rrrrr,src@seZdZddZdS)VariablecCst|S)N)r!)rrrrr'=szVariable.serializeN)rrrr'rrrrr(;sr(c@seZdZddZdS)ValuecCs
dj|S)Nz"{0}")r#)rrrrr'CszValue.serializeN)rrrr'rrrrr)Asr)c@seZdZddZdS)OpcCst|S)N)r!)rrrrr'IszOp.serializeN)rrrr'rrrrr*Gsr*implementation_versionplatform_python_implementationimplementation_namepython_full_versionplatform_releaseplatform_versionplatform_machineplatform_systempython_versionsys_platformos_namezos.namezsys.platformzplatform.versionzplatform.machinezplatform.python_implementationpython_implementationZextra)zos.namezsys.platformzplatform.versionzplatform.machinezplatform.python_implementationr6cCsttj|d|dS)Nr)r(ALIASESget)sltrrrisr<z===z==z>=z<=z!=z~=>sz(_coerce_parse_result..)
isinstancer)resultsrrrrGs
rGTcCst|tr4t|dkr4t|dttfr4t|dSt|trndd|D}|rZdj|Sddj|dSn"t|trdjdd	|DS|SdS)
Nrrcss|]}t|ddVqdS)F)firstN)_format_marker)rHmrrr	sz!_format_marker.. rErFcSsg|]}|jqSr)r')rHrOrrrrJsz"_format_marker..)rKlistlenrDrNjoin)markerrMinnerrrrrNs


rNcCs||kS)Nr)lhsrhsrrrr<scCs||kS)Nr)rWrXrrrr<s)r?znot inr>z<=z==z!=z>=r=c
Cslytdj|j|g}Wntk
r.YnX|j|Stj|j}|dkrbtdj||||||S)Nz#Undefined {0!r} on {1!r} and {2!r}.)	rrTr'rcontains
_operatorsr8rr#)rWoprXspecZoperrrr_eval_ops
r^cCs&|j|t}|tkr"tdj||S)Nz/{0!r} does not exist in evaluation environment.)r8
_undefinedrr#)environmentnamerrrr_get_envs
rbc	Csgg}x|D]}t|tr0|djt||qt|tr|\}}}t|trbt||j}|j}n|j}t||j}|djt|||q|dkr|jgqWt	dd|DS)NrrCcss|]}t|VqdS)N)all)rHitemrrrrPsz$_evaluate_markers..re)
rKrRappend_evaluate_markersrDr(rbrr^any)	Zmarkersr`groupsrUrWr\rXZ	lhs_valueZ	rhs_valuerrrrgs




rgcCs2dj|}|j}|dkr.||dt|j7}|S)Nz{0.major}.{0.minor}.{0.micro}finalr)r#releaselevelr!serial)infoversionZkindrrrformat_full_versions

rocCslttdr ttjj}tjj}nd}d}||tjtjtj	tj
tjtjtjtjddtjdS)Nimplementation0rY)r-r+r5r1r/r2r0r.r,r3r4)
hasattrsysrorprnraosplatformmachinereleasesystemr3r6)Ziverr-rrrrs 

c@s.eZdZddZddZddZd
dd	ZdS)rcCs`yttj||_WnFtk
rZ}z*dj|||j|jd}t|WYdd}~XnXdS)Nz+Invalid marker: {0!r}, parse error at {1!r})rGMARKERZparseString_markersrr#locr)rrUeZerr_strrrrr szMarker.__init__cCs
t|jS)N)rNr|)rrrrr"szMarker.__str__cCsdjt|S)Nz)r#r!)rrrrr%szMarker.__repr__NcCs$t}|dk	r|j|t|j|S)a$Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        N)rupdatergr|)rr`Zcurrent_environmentrrrevaluate s	
zMarker.evaluate)N)rrrr r"r%rrrrrrs)T)AZ
__future__rrroperatorrurvrtZpkg_resources.extern.pyparsingrrrrr	r
rrr
LZ_compatrZ
specifiersrr__all__
ValueErrorrrrobjectrr(r)r*ZVARIABLEr7ZsetParseActionZVERSION_CMPZ	MARKER_OPZMARKER_VALUEZBOOLOPZ
MARKER_VARZMARKER_ITEMsuppressZLPARENZRPARENZMARKER_EXPRZMARKER_ATOMr{rGrNltleeqnegegtr[r^r_rbrgrorrrrrrsx
	6


PKtge[4_))2packaging/__pycache__/version.cpython-36.opt-1.pycnu[3

vh$-@sddlmZmZmZddlZddlZddlZddlmZddddd	gZ	ej
d
ddd
dddgZddZGddde
ZGdddeZGdddeZejdejZddddddZddZddZdZGd ddeZd!d"Zejd#Zd$d%Zd&d'ZdS)()absolute_importdivisionprint_functionN)InfinityparseVersion
LegacyVersionInvalidVersionVERSION_PATTERN_VersionepochreleasedevprepostlocalcCs&yt|Stk
r t|SXdS)z
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    N)rr
r	)versionr/usr/lib/python3.6/version.pyrsc@seZdZdZdS)r
zF
    An invalid version was found, users should refer to PEP 440.
    N)__name__
__module____qualname____doc__rrrrr
$sc@sLeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
dS)_BaseVersioncCs
t|jS)N)hash_key)selfrrr__hash__,sz_BaseVersion.__hash__cCs|j|ddS)NcSs||kS)Nr)sorrr0sz%_BaseVersion.__lt__..)_compare)rotherrrr__lt__/sz_BaseVersion.__lt__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!3sz%_BaseVersion.__le__..)r")rr#rrr__le__2sz_BaseVersion.__le__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!6sz%_BaseVersion.__eq__..)r")rr#rrr__eq__5sz_BaseVersion.__eq__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!9sz%_BaseVersion.__ge__..)r")rr#rrr__ge__8sz_BaseVersion.__ge__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!<sz%_BaseVersion.__gt__..)r")rr#rrr__gt__;sz_BaseVersion.__gt__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!?sz%_BaseVersion.__ne__..)r")rr#rrr__ne__>sz_BaseVersion.__ne__cCst|tstS||j|jS)N)
isinstancerNotImplementedr)rr#methodrrrr"As
z_BaseVersion._compareN)rrrrr$r%r&r'r(r)r"rrrrr*src@s`eZdZddZddZddZeddZed	d
ZeddZ	ed
dZ
eddZdS)r	cCst||_t|j|_dS)N)str_version_legacy_cmpkeyr)rrrrr__init__Js
zLegacyVersion.__init__cCs|jS)N)r.)rrrr__str__NszLegacyVersion.__str__cCsdjtt|S)Nz)formatreprr-)rrrr__repr__QszLegacyVersion.__repr__cCs|jS)N)r.)rrrrpublicTszLegacyVersion.publiccCs|jS)N)r.)rrrrbase_versionXszLegacyVersion.base_versioncCsdS)Nr)rrrrr\szLegacyVersion.localcCsdS)NFr)rrrr
is_prerelease`szLegacyVersion.is_prereleasecCsdS)NFr)rrrris_postreleasedszLegacyVersion.is_postreleaseN)rrrr0r1r4propertyr5r6rr7r8rrrrr	Hsz(\d+ | [a-z]+ | \.| -)czfinal-@)rpreview-rcrccsbxVtj|D]H}tj||}|s|dkr,q|dddkrJ|jdVqd|VqWdVdS)N.r
0123456789*z*final)_legacy_version_component_resplit_legacy_version_replacement_mapgetzfill)rpartrrr_parse_version_partsrsrIcCsd}g}xlt|jD]\}|jdrh|dkrJx|rH|ddkrH|jq.Wx|rf|ddkrf|jqLW|j|qWt|}||fS)	NrrBz*finalz*final-Z00000000rJrJ)rIlower
startswithpopappendtuple)rr
partsrHrrrr/s
r/a
    v?
    (?:
        (?:(?P[0-9]+)!)?                           # epoch
        (?P[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P
                                          # pre-release
            [-_\.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
c@s|eZdZejdedejejBZddZ	ddZ
ddZed	d
Z
eddZed
dZeddZeddZdS)rz^\s*z\s*$c	Cs|jj|}|stdj|t|jdr8t|jdndtdd|jdjdDt	|jd|jd	t	|jd
|jdp|jdt	|jd
|jdt
|jdd|_t|jj
|jj|jj|jj|jj|jj|_dS)NzInvalid version: '{0}'r
rcss|]}t|VqdS)N)int).0irrr	sz#Version.__init__..rr?Zpre_lZpre_nZpost_lZpost_n1Zpost_n2Zdev_lZdev_nr)r
rrrrr)_regexsearchr
r2rgrouprQrOrD_parse_letter_version_parse_local_versionr._cmpkeyr
rrrrrr)rrmatchrrrr0s.

zVersion.__init__cCsdjtt|S)Nz)r2r3r-)rrrrr4szVersion.__repr__cCsg}|jjdkr$|jdj|jj|jdjdd|jjD|jjdk	rl|jdjdd|jjD|jjdk	r|jdj|jjd	|jjdk	r|jd
j|jjd	|jj	dk	r|jdjdjdd|jj	Ddj|S)
Nrz{0}!r?css|]}t|VqdS)N)r-)rRxrrrrTsz"Version.__str__..css|]}t|VqdS)N)r-)rRr\rrrrTsz.post{0}rz.dev{0}z+{0}css|]}t|VqdS)N)r-)rRr\rrrrTs)
r.r
rNr2joinrrrrr)rrPrrrr1s zVersion.__str__cCst|jdddS)N+rr)r-rD)rrrrr5
szVersion.publiccCsLg}|jjdkr$|jdj|jj|jdjdd|jjDdj|S)Nrz{0}!r?css|]}t|VqdS)N)r-)rRr\rrrrTsz'Version.base_version..r])r.r
rNr2r^r)rrPrrrr6s
zVersion.base_versioncCs$t|}d|kr |jdddSdS)Nr_r)r-rD)rZversion_stringrrrrsz
Version.localcCst|jjp|jjS)N)boolr.rr)rrrrr7!szVersion.is_prereleasecCst|jjS)N)r`r.r)rrrrr8%szVersion.is_postreleaseN)rrrrecompilerVERBOSE
IGNORECASErUr0r4r1r9r5r6rr7r8rrrrrs
#
cCsx|rZ|dkrd}|j}|dkr&d}n(|dkr4d}n|d
krBd	}n|dkrNd}|t|fS|rt|rtd}|t|fSdS)NrZalphaaZbetabr:rr<r>revrr)r:rr<)rgrh)rKrQ)ZletterZnumberrrrrX*s 
rXz[\._-]cCs$|dk	r tddtj|DSdS)zR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    Ncss&|]}|js|jnt|VqdS)N)isdigitrKrQ)rRrHrrrrTRsz'_parse_local_version..)rO_local_version_seperatorsrD)rrrrrYLsrYcCsttttjddt|}|dkr@|dkr@|dk	r@t}n|dkrLt}|dkrZt}|dkrft}|dkrvt}ntdd|D}||||||fS)NcSs|dkS)Nrr)r\rrrr!`sz_cmpkey..css*|]"}t|tr|dfnt|fVqdS)r]N)r*rQr)rRrSrrrrTsz_cmpkey..)rOreversedlist	itertools	dropwhiler)r
rrrrrrrrrZWs&		
rZ)Z
__future__rrrcollectionsrmraZ_structuresr__all__
namedtuplerr
ValueErrorr
objectrr	rbrcrCrErIr/rrrXrjrYrZrrrrs.!
9k
PKtge[ckX-packaging/__pycache__/__init__.cpython-36.pycnu[3

vh@sTddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS)
)absolute_importdivisionprint_function)
__author__
__copyright__	__email____license____summary__	__title____uri____version__rr
rr
rrr	rN)Z
__future__rrr	__about__rrrr	r
rrr
__all__rr/usr/lib/python3.6/__init__.pys(
PKtge[1EŜ,packaging/__pycache__/_compat.cpython-36.pycnu[3

vh\@sVddlmZmZmZddlZejddkZejddkZerDefZ	ne
fZ	ddZdS))absolute_importdivisionprint_functionNcs&Gfddd}tj|dfiS)z/
    Create a base class with a metaclass.
    cseZdZfddZdS)z!with_metaclass..metaclasscs||S)N)clsnameZ
this_basesd)basesmetar/usr/lib/python3.6/_compat.py__new__sz)with_metaclass..metaclass.__new__N)__name__
__module____qualname__rr)rrrr
	metaclasssrZtemporary_class)typer)rrrr)rrr
with_metaclasssr)Z
__future__rrrsysversion_infoZPY2ZPY3strZstring_typesZ
basestringrrrrr
sPKtge[ckX3packaging/__pycache__/__init__.cpython-36.opt-1.pycnu[3

vh@sTddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS)
)absolute_importdivisionprint_function)
__author__
__copyright__	__email____license____summary__	__title____uri____version__rr
rr
rrr	rN)Z
__future__rrr	__about__rrrr	r
rrr
__all__rr/usr/lib/python3.6/__init__.pys(
PKtge[ʡd"d",packaging/__pycache__/markers.cpython-36.pycnu[3

vh8 	@s@ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
mZmZddlm
Z
mZmZmZddlmZddlmZddlmZmZd	d
ddd
gZGdd	d	eZGdd
d
eZGdddeZGdddeZGdddeZGdddeZ GdddeZ!ededBedBedBedBedBedBed Bed!Bed"Bed#Bed$Bed%Bed&Bed'Bed(Bed)Bed*BZ"d#d"ddddd+Z#e"j$d,d-ed.ed/Bed0Bed1Bed2Bed3Bed4Bed5BZ%e%ed6Bed7BZ&e&j$d8d-ed9ed:BZ'e'j$d;d-ed<ed=BZ(e"e'BZ)ee)e&e)Z*e*j$d>d-ed?j+Z,ed@j+Z-eZ.e*ee,e.e-BZ/e.e/e
e(e.>ee.eZ0dAdBZ1dSdDdEZ2dFd-dGd-ej3ej4ej5ej6ej7ej8dHZ9dIdJZ:eZ;dKdLZdQd
Z?GdRddeZ@dS)T)absolute_importdivisionprint_functionN)ParseExceptionParseResultsstringStart	stringEnd)
ZeroOrMoreGroupForwardQuotedString)Literal)string_types)	SpecifierInvalidSpecifier
InvalidMarkerUndefinedComparisonUndefinedEnvironmentNameMarkerdefault_environmentc@seZdZdZdS)rzE
    An invalid marker was found, users should refer to PEP 508.
    N)__name__
__module____qualname____doc__rr/usr/lib/python3.6/markers.pyrsc@seZdZdZdS)rzP
    An invalid operation was attempted on a value that doesn't support it.
    N)rrrrrrrrrsc@seZdZdZdS)rz\
    A name was attempted to be used that does not exist inside of the
    environment.
    N)rrrrrrrrr%sc@s,eZdZddZddZddZddZd	S)
NodecCs
||_dS)N)value)selfrrrr__init__.sz
Node.__init__cCs
t|jS)N)strr)rrrr__str__1szNode.__str__cCsdj|jjt|S)Nz<{0}({1!r})>)format	__class__rr!)rrrr__repr__4sz
Node.__repr__cCstdS)N)NotImplementedError)rrrr	serialize7szNode.serializeN)rrrr r"r%r'rrrrr,src@seZdZddZdS)VariablecCst|S)N)r!)rrrrr'=szVariable.serializeN)rrrr'rrrrr(;sr(c@seZdZddZdS)ValuecCs
dj|S)Nz"{0}")r#)rrrrr'CszValue.serializeN)rrrr'rrrrr)Asr)c@seZdZddZdS)OpcCst|S)N)r!)rrrrr'IszOp.serializeN)rrrr'rrrrr*Gsr*implementation_versionplatform_python_implementationimplementation_namepython_full_versionplatform_releaseplatform_versionplatform_machineplatform_systempython_versionsys_platformos_namezos.namezsys.platformzplatform.versionzplatform.machinezplatform.python_implementationpython_implementationZextra)zos.namezsys.platformzplatform.versionzplatform.machinezplatform.python_implementationr6cCsttj|d|dS)Nr)r(ALIASESget)sltrrrisr<z===z==z>=z<=z!=z~=>sz(_coerce_parse_result..)
isinstancer)resultsrrrrGs
rGTcCst|tttfstt|trHt|dkrHt|dttfrHt|dSt|trdd|D}|rndj|Sddj|dSn"t|trdjdd	|DS|SdS)
Nrrcss|]}t|ddVqdS)F)firstN)_format_marker)rHmrrr	sz!_format_marker.. rErFcSsg|]}|jqSr)r')rHrOrrrrJsz"_format_marker..)rKlistrDrAssertionErrorlenrNjoin)markerrMinnerrrrrNs


rNcCs||kS)Nr)lhsrhsrrrr<scCs||kS)Nr)rXrYrrrr<s)r?znot inr>z<=z==z!=z>=r=c
Cslytdj|j|g}Wntk
r.YnX|j|Stj|j}|dkrbtdj||||||S)Nz#Undefined {0!r} on {1!r} and {2!r}.)	rrUr'rcontains
_operatorsr8rr#)rXoprYspecZoperrrr_eval_ops
r_cCs&|j|t}|tkr"tdj||S)Nz/{0!r} does not exist in evaluation environment.)r8
_undefinedrr#)environmentnamerrrr_get_envs
rcc	Csgg}x|D]}t|tttfs$tt|trD|djt||qt|tr|\}}}t|trvt||j	}|j	}n|j	}t||j	}|djt
|||q|dkst|dkr|jgqWtdd|DS)	NrrBrCcss|]}t|VqdS)N)all)rHitemrrrrPsz$_evaluate_markers..rf)rBrC)rKrRrDrrSappend_evaluate_markersr(rcrr_any)	ZmarkersragroupsrVrXr]rYZ	lhs_valueZ	rhs_valuerrrrhs"




rhcCs2dj|}|j}|dkr.||dt|j7}|S)Nz{0.major}.{0.minor}.{0.micro}finalr)r#releaselevelr!serial)infoversionZkindrrrformat_full_versions

rpcCslttdr ttjj}tjj}nd}d}||tjtjtj	tj
tjtjtjtjddtjdS)Nimplementation0rZ)r-r+r5r1r/r2r0r.r,r3r4)
hasattrsysrprqrorbosplatformmachinereleasesystemr3r6)Ziverr-rrrrs 

c@s.eZdZddZddZddZd
dd	ZdS)rcCs`yttj||_WnFtk
rZ}z*dj|||j|jd}t|WYdd}~XnXdS)Nz+Invalid marker: {0!r}, parse error at {1!r})rGMARKERZparseString_markersrr#locr)rrVeZerr_strrrrr szMarker.__init__cCs
t|jS)N)rNr})rrrrr"szMarker.__str__cCsdjt|S)Nz)r#r!)rrrrr%szMarker.__repr__NcCs$t}|dk	r|j|t|j|S)a$Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        N)rupdaterhr})rraZcurrent_environmentrrrevaluate s	
zMarker.evaluate)N)rrrr r"r%rrrrrrs)T)AZ
__future__rrroperatorrvrwruZpkg_resources.extern.pyparsingrrrrr	r
rrr
LZ_compatrZ
specifiersrr__all__
ValueErrorrrrobjectrr(r)r*ZVARIABLEr7ZsetParseActionZVERSION_CMPZ	MARKER_OPZMARKER_VALUEZBOOLOPZ
MARKER_VARZMARKER_ITEMsuppressZLPARENZRPARENZMARKER_EXPRZMARKER_ATOMr|rGrNltleeqnegegtr\r_r`rcrhrprrrrrrsx
	6


PKtge[1EŜ2packaging/__pycache__/_compat.cpython-36.opt-1.pycnu[3

vh\@sVddlmZmZmZddlZejddkZejddkZerDefZ	ne
fZ	ddZdS))absolute_importdivisionprint_functionNcs&Gfddd}tj|dfiS)z/
    Create a base class with a metaclass.
    cseZdZfddZdS)z!with_metaclass..metaclasscs||S)N)clsnameZ
this_basesd)basesmetar/usr/lib/python3.6/_compat.py__new__sz)with_metaclass..metaclass.__new__N)__name__
__module____qualname__rr)rrrr
	metaclasssrZtemporary_class)typer)rrrr)rrr
with_metaclasssr)Z
__future__rrrsysversion_infoZPY2ZPY3strZstring_typesZ
basestringrrrrr
sPKtge[090packaging/__pycache__/utils.cpython-36.opt-1.pycnu[3

vh@s2ddlmZmZmZddlZejdZddZdS))absolute_importdivisionprint_functionNz[-_.]+cCstjd|jS)N-)_canonicalize_regexsublower)namer
/usr/lib/python3.6/utils.pycanonicalize_namesr)Z
__future__rrrrecompilerrr
r
r
rs
PKtge[09*packaging/__pycache__/utils.cpython-36.pycnu[3

vh@s2ddlmZmZmZddlZejdZddZdS))absolute_importdivisionprint_functionNz[-_.]+cCstjd|jS)N-)_canonicalize_regexsublower)namer
/usr/lib/python3.6/utils.pycanonicalize_namesr)Z
__future__rrrrecompilerrr
r
r
rs
PKtge[w4packaging/__pycache__/__about__.cpython-36.opt-1.pycnu[3

vh@sPddlmZmZmZdddddddd	gZd
ZdZdZd
ZdZ	dZ
dZde	ZdS))absolute_importdivisionprint_function	__title____summary____uri____version__
__author__	__email____license__
__copyright__Z	packagingz"Core utilities for Python packagesz!https://github.com/pypa/packagingz16.8z)Donald Stufft and individual contributorszdonald@stufft.ioz"BSD or Apache License, Version 2.0zCopyright 2014-2016 %sN)
Z
__future__rrr__all__rrrrr	r
rrrr/usr/lib/python3.6/__about__.pys

PKtge[4_)),packaging/__pycache__/version.cpython-36.pycnu[3

vh$-@sddlmZmZmZddlZddlZddlZddlmZddddd	gZ	ej
d
ddd
dddgZddZGddde
ZGdddeZGdddeZejdejZddddddZddZddZdZGd ddeZd!d"Zejd#Zd$d%Zd&d'ZdS)()absolute_importdivisionprint_functionN)InfinityparseVersion
LegacyVersionInvalidVersionVERSION_PATTERN_VersionepochreleasedevprepostlocalcCs&yt|Stk
r t|SXdS)z
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    N)rr
r	)versionr/usr/lib/python3.6/version.pyrsc@seZdZdZdS)r
zF
    An invalid version was found, users should refer to PEP 440.
    N)__name__
__module____qualname____doc__rrrrr
$sc@sLeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
dS)_BaseVersioncCs
t|jS)N)hash_key)selfrrr__hash__,sz_BaseVersion.__hash__cCs|j|ddS)NcSs||kS)Nr)sorrr0sz%_BaseVersion.__lt__..)_compare)rotherrrr__lt__/sz_BaseVersion.__lt__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!3sz%_BaseVersion.__le__..)r")rr#rrr__le__2sz_BaseVersion.__le__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!6sz%_BaseVersion.__eq__..)r")rr#rrr__eq__5sz_BaseVersion.__eq__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!9sz%_BaseVersion.__ge__..)r")rr#rrr__ge__8sz_BaseVersion.__ge__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!<sz%_BaseVersion.__gt__..)r")rr#rrr__gt__;sz_BaseVersion.__gt__cCs|j|ddS)NcSs||kS)Nr)rr rrrr!?sz%_BaseVersion.__ne__..)r")rr#rrr__ne__>sz_BaseVersion.__ne__cCst|tstS||j|jS)N)
isinstancerNotImplementedr)rr#methodrrrr"As
z_BaseVersion._compareN)rrrrr$r%r&r'r(r)r"rrrrr*src@s`eZdZddZddZddZeddZed	d
ZeddZ	ed
dZ
eddZdS)r	cCst||_t|j|_dS)N)str_version_legacy_cmpkeyr)rrrrr__init__Js
zLegacyVersion.__init__cCs|jS)N)r.)rrrr__str__NszLegacyVersion.__str__cCsdjtt|S)Nz)formatreprr-)rrrr__repr__QszLegacyVersion.__repr__cCs|jS)N)r.)rrrrpublicTszLegacyVersion.publiccCs|jS)N)r.)rrrrbase_versionXszLegacyVersion.base_versioncCsdS)Nr)rrrrr\szLegacyVersion.localcCsdS)NFr)rrrr
is_prerelease`szLegacyVersion.is_prereleasecCsdS)NFr)rrrris_postreleasedszLegacyVersion.is_postreleaseN)rrrr0r1r4propertyr5r6rr7r8rrrrr	Hsz(\d+ | [a-z]+ | \.| -)czfinal-@)rpreview-rcrccsbxVtj|D]H}tj||}|s|dkr,q|dddkrJ|jdVqd|VqWdVdS)N.r
0123456789*z*final)_legacy_version_component_resplit_legacy_version_replacement_mapgetzfill)rpartrrr_parse_version_partsrsrIcCsd}g}xlt|jD]\}|jdrh|dkrJx|rH|ddkrH|jq.Wx|rf|ddkrf|jqLW|j|qWt|}||fS)	NrrBz*finalz*final-Z00000000rJrJ)rIlower
startswithpopappendtuple)rr
partsrHrrrr/s
r/a
    v?
    (?:
        (?:(?P[0-9]+)!)?                           # epoch
        (?P[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P
                                          # pre-release
            [-_\.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
c@s|eZdZejdedejejBZddZ	ddZ
ddZed	d
Z
eddZed
dZeddZeddZdS)rz^\s*z\s*$c	Cs|jj|}|stdj|t|jdr8t|jdndtdd|jdjdDt	|jd|jd	t	|jd
|jdp|jdt	|jd
|jdt
|jdd|_t|jj
|jj|jj|jj|jj|jj|_dS)NzInvalid version: '{0}'r
rcss|]}t|VqdS)N)int).0irrr	sz#Version.__init__..rr?Zpre_lZpre_nZpost_lZpost_n1Zpost_n2Zdev_lZdev_nr)r
rrrrr)_regexsearchr
r2rgrouprQrOrD_parse_letter_version_parse_local_versionr._cmpkeyr
rrrrrr)rrmatchrrrr0s.

zVersion.__init__cCsdjtt|S)Nz)r2r3r-)rrrrr4szVersion.__repr__cCsg}|jjdkr$|jdj|jj|jdjdd|jjD|jjdk	rl|jdjdd|jjD|jjdk	r|jdj|jjd	|jjdk	r|jd
j|jjd	|jj	dk	r|jdjdjdd|jj	Ddj|S)
Nrz{0}!r?css|]}t|VqdS)N)r-)rRxrrrrTsz"Version.__str__..css|]}t|VqdS)N)r-)rRr\rrrrTsz.post{0}rz.dev{0}z+{0}css|]}t|VqdS)N)r-)rRr\rrrrTs)
r.r
rNr2joinrrrrr)rrPrrrr1s zVersion.__str__cCst|jdddS)N+rr)r-rD)rrrrr5
szVersion.publiccCsLg}|jjdkr$|jdj|jj|jdjdd|jjDdj|S)Nrz{0}!r?css|]}t|VqdS)N)r-)rRr\rrrrTsz'Version.base_version..r])r.r
rNr2r^r)rrPrrrr6s
zVersion.base_versioncCs$t|}d|kr |jdddSdS)Nr_r)r-rD)rZversion_stringrrrrsz
Version.localcCst|jjp|jjS)N)boolr.rr)rrrrr7!szVersion.is_prereleasecCst|jjS)N)r`r.r)rrrrr8%szVersion.is_postreleaseN)rrrrecompilerVERBOSE
IGNORECASErUr0r4r1r9r5r6rr7r8rrrrrs
#
cCsx|rZ|dkrd}|j}|dkr&d}n(|dkr4d}n|d
krBd	}n|dkrNd}|t|fS|rt|rtd}|t|fSdS)NrZalphaaZbetabr:rr<r>revrr)r:rr<)rgrh)rKrQ)ZletterZnumberrrrrX*s 
rXz[\._-]cCs$|dk	r tddtj|DSdS)zR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    Ncss&|]}|js|jnt|VqdS)N)isdigitrKrQ)rRrHrrrrTRsz'_parse_local_version..)rO_local_version_seperatorsrD)rrrrrYLsrYcCsttttjddt|}|dkr@|dkr@|dk	r@t}n|dkrLt}|dkrZt}|dkrft}|dkrvt}ntdd|D}||||||fS)NcSs|dkS)Nrr)r\rrrr!`sz_cmpkey..css*|]"}t|tr|dfnt|fVqdS)r]N)r*rQr)rRrSrrrrTsz_cmpkey..)rOreversedlist	itertools	dropwhiler)r
rrrrrrrrrZWs&		
rZ)Z
__future__rrrcollectionsrmraZ_structuresr__all__
namedtuplerr
ValueErrorr
objectrr	rbrcrCrErIr/rrrXrjrYrZrrrrs.!
9k
PKtge[w.packaging/__pycache__/__about__.cpython-36.pycnu[3

vh@sPddlmZmZmZdddddddd	gZd
ZdZdZd
ZdZ	dZ
dZde	ZdS))absolute_importdivisionprint_function	__title____summary____uri____version__
__author__	__email____license__
__copyright__Z	packagingz"Core utilities for Python packagesz!https://github.com/pypa/packagingz16.8z)Donald Stufft and individual contributorszdonald@stufft.ioz"BSD or Apache License, Version 2.0zCopyright 2014-2016 %sN)
Z
__future__rrr__all__rrrrr	r
rrrr/usr/lib/python3.6/__about__.pys

PKtge[7Tm7packaging/__pycache__/requirements.cpython-36.opt-1.pycnu[3

vh@srddlmZmZmZddlZddlZddlmZmZm	Z	m
Z
ddlmZmZm
Z
mZmZddlmZddlmZddlmZmZdd	lmZmZmZGd
ddeZeejejZ edj!Z"ed
j!Z#edj!Z$edj!Z%edj!Z&edj!Z'edj!Z(edZ)e ee)e BZ*ee ee*Z+e+dZ,e+Z-eddZ.e(e.Z/e-ee&e-Z0e"e
e0e#dZ1eej2ej3ej4BZ5eej2ej3ej4BZ6e5e6AZ7ee7ee&e7ddddZ8e
e$e8e%e8BZ9e9j:dde	e9dZ;e;j:dde	edZej:d de'Ze/e
e=Z?e,e
e1e?e>BZ@ee@eZAGd!d"d"eBZCdS)#)absolute_importdivisionprint_functionN)stringStart	stringEndoriginalTextForParseException)
ZeroOrMoreWordOptionalRegexCombine)Literal)parse)MARKER_EXPRMarker)LegacySpecifier	SpecifierSpecifierSetc@seZdZdZdS)InvalidRequirementzJ
    An invalid requirement was found, users should refer to PEP 508.
    N)__name__
__module____qualname____doc__rr"/usr/lib/python3.6/requirements.pyrsr[](),;@z-_.namez[^ ]+urlextrasF)Z
joinStringZadjacent	_raw_speccCs
|jpdS)N)r')sltrrr6sr,	specifiercCs|dS)Nrr)r)r*r+rrrr,9smarkercCst||j|jS)N)rZ_original_startZ
_original_end)r)r*r+rrrr,=sc@s(eZdZdZddZddZddZdS)	RequirementzParse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    cCsytj|}Wn@tk
rN}z$tdj||j|jdWYdd}~XnX|j|_|jrtj|j}|j	ot|j
s|j	r|j
rtd|j|_nd|_t|jr|jj
ng|_t|j|_|jr|jnd|_dS)Nz+Invalid requirement, parse error at "{0!r}"zInvalid URL given)REQUIREMENTZparseStringrrformatlocr$r%urlparseschemeZnetlocsetr&ZasListrr-r.)selfZrequirement_stringZreqeZ
parsed_urlrrr__init__Xs"*
zRequirement.__init__cCsz|jg}|jr*|jdjdjt|j|jr@|jt|j|jrX|jdj|j|j	rp|jdj|j	dj|S)Nz[{0}]r!z@ {0}z; {0}r()
r$r&appendr2joinsortedr-strr%r.)r7partsrrr__str__mszRequirement.__str__cCsdjt|S)Nz)r2r=)r7rrr__repr__~szRequirement.__repr__N)rrrrr9r?r@rrrrr/Ksr/)DZ
__future__rrrstringreZpkg_resources.extern.pyparsingrrrrr	r
rrr
rLZ%pkg_resources.extern.six.moves.urllibrr4ZmarkersrrZ
specifiersrrr
ValueErrorrZ
ascii_lettersZdigitsZALPHANUMsuppressZLBRACKETZRBRACKETZLPARENZRPARENCOMMAZ	SEMICOLONATZPUNCTUATIONZIDENTIFIER_ENDZ
IDENTIFIERNAMEZEXTRAZURIZURLZEXTRAS_LISTZEXTRASZ
_regex_strVERBOSE
IGNORECASEZVERSION_PEP440ZVERSION_LEGACYZVERSION_ONEZVERSION_MANYZ
_VERSION_SPECZsetParseActionZVERSION_SPECZMARKER_SEPERATORZMARKERZVERSION_AND_MARKERZURL_AND_MARKERZNAMED_REQUIREMENTr1objectr/rrrrsZ
PKtge[{

0packaging/__pycache__/_structures.cpython-36.pycnu[3

vh@sDddlmZmZmZGdddeZeZGdddeZeZdS))absolute_importdivisionprint_functionc@sTeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
ddZdS)InfinitycCsdS)Nr)selfrr!/usr/lib/python3.6/_structures.py__repr__	szInfinity.__repr__cCstt|S)N)hashrepr)rrrr__hash__szInfinity.__hash__cCsdS)NFr)rotherrrr__lt__szInfinity.__lt__cCsdS)NFr)rr
rrr__le__szInfinity.__le__cCst||jS)N)
isinstance	__class__)rr
rrr__eq__szInfinity.__eq__cCst||jS)N)rr)rr
rrr__ne__szInfinity.__ne__cCsdS)NTr)rr
rrr__gt__szInfinity.__gt__cCsdS)NTr)rr
rrr__ge__szInfinity.__ge__cCstS)N)NegativeInfinity)rrrr__neg__!szInfinity.__neg__N)__name__
__module____qualname__r	rrrrrrrrrrrrrsrc@sTeZdZddZddZddZddZd	d
ZddZd
dZ	ddZ
ddZdS)rcCsdS)Nz	-Infinityr)rrrrr	)szNegativeInfinity.__repr__cCstt|S)N)r
r)rrrrr,szNegativeInfinity.__hash__cCsdS)NTr)rr
rrrr/szNegativeInfinity.__lt__cCsdS)NTr)rr
rrrr2szNegativeInfinity.__le__cCst||jS)N)rr)rr
rrrr5szNegativeInfinity.__eq__cCst||jS)N)rr)rr
rrrr8szNegativeInfinity.__ne__cCsdS)NFr)rr
rrrr;szNegativeInfinity.__gt__cCsdS)NFr)rr
rrrr>szNegativeInfinity.__ge__cCstS)N)r)rrrrrAszNegativeInfinity.__neg__N)rrrr	rrrrrrrrrrrrr'srN)Z
__future__rrrobjectrrrrrrsPKtge[7Tm1packaging/__pycache__/requirements.cpython-36.pycnu[3

vh@srddlmZmZmZddlZddlZddlmZmZm	Z	m
Z
ddlmZmZm
Z
mZmZddlmZddlmZddlmZmZdd	lmZmZmZGd
ddeZeejejZ edj!Z"ed
j!Z#edj!Z$edj!Z%edj!Z&edj!Z'edj!Z(edZ)e ee)e BZ*ee ee*Z+e+dZ,e+Z-eddZ.e(e.Z/e-ee&e-Z0e"e
e0e#dZ1eej2ej3ej4BZ5eej2ej3ej4BZ6e5e6AZ7ee7ee&e7ddddZ8e
e$e8e%e8BZ9e9j:dde	e9dZ;e;j:dde	edZej:d de'Ze/e
e=Z?e,e
e1e?e>BZ@ee@eZAGd!d"d"eBZCdS)#)absolute_importdivisionprint_functionN)stringStart	stringEndoriginalTextForParseException)
ZeroOrMoreWordOptionalRegexCombine)Literal)parse)MARKER_EXPRMarker)LegacySpecifier	SpecifierSpecifierSetc@seZdZdZdS)InvalidRequirementzJ
    An invalid requirement was found, users should refer to PEP 508.
    N)__name__
__module____qualname____doc__rr"/usr/lib/python3.6/requirements.pyrsr[](),;@z-_.namez[^ ]+urlextrasF)Z
joinStringZadjacent	_raw_speccCs
|jpdS)N)r')sltrrr6sr,	specifiercCs|dS)Nrr)r)r*r+rrrr,9smarkercCst||j|jS)N)rZ_original_startZ
_original_end)r)r*r+rrrr,=sc@s(eZdZdZddZddZddZdS)	RequirementzParse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    cCsytj|}Wn@tk
rN}z$tdj||j|jdWYdd}~XnX|j|_|jrtj|j}|j	ot|j
s|j	r|j
rtd|j|_nd|_t|jr|jj
ng|_t|j|_|jr|jnd|_dS)Nz+Invalid requirement, parse error at "{0!r}"zInvalid URL given)REQUIREMENTZparseStringrrformatlocr$r%urlparseschemeZnetlocsetr&ZasListrr-r.)selfZrequirement_stringZreqeZ
parsed_urlrrr__init__Xs"*
zRequirement.__init__cCsz|jg}|jr*|jdjdjt|j|jr@|jt|j|jrX|jdj|j|j	rp|jdj|j	dj|S)Nz[{0}]r!z@ {0}z; {0}r()
r$r&appendr2joinsortedr-strr%r.)r7partsrrr__str__mszRequirement.__str__cCsdjt|S)Nz)r2r=)r7rrr__repr__~szRequirement.__repr__N)rrrrr9r?r@rrrrr/Ksr/)DZ
__future__rrrstringreZpkg_resources.extern.pyparsingrrrrr	r
rrr
rLZ%pkg_resources.extern.six.moves.urllibrr4ZmarkersrrZ
specifiersrrr
ValueErrorrZ
ascii_lettersZdigitsZALPHANUMsuppressZLBRACKETZRBRACKETZLPARENZRPARENCOMMAZ	SEMICOLONATZPUNCTUATIONZIDENTIFIER_ENDZ
IDENTIFIERNAMEZEXTRAZURIZURLZEXTRAS_LISTZEXTRASZ
_regex_strVERBOSE
IGNORECASEZVERSION_PEP440ZVERSION_LEGACYZVERSION_ONEZVERSION_MANYZ
_VERSION_SPECZsetParseActionZVERSION_SPECZMARKER_SEPERATORZMARKERZVERSION_AND_MARKERZURL_AND_MARKERZNAMED_REQUIREMENTr1objectr/rrrrsZ
PKtge[tsMM5packaging/__pycache__/specifiers.cpython-36.opt-1.pycnu[3

vhym@sddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZm
Z
mZGdddeZGdd	d	e
ejeZGd
ddeZGdd
d
eZddZGdddeZejdZddZddZGdddeZdS))absolute_importdivisionprint_functionN)string_typeswith_metaclass)Version
LegacyVersionparsec@seZdZdZdS)InvalidSpecifierzH
    An invalid specifier was found, users should refer to PEP 440.
    N)__name__
__module____qualname____doc__rr /usr/lib/python3.6/specifiers.pyrsrc@seZdZejddZejddZejddZejddZej	d	d
Z
e
jdd
Z
ejdd
dZejdddZ
dS)
BaseSpecifiercCsdS)z
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        Nr)selfrrr__str__szBaseSpecifier.__str__cCsdS)zF
        Returns a hash value for this Specifier like object.
        Nr)rrrr__hash__szBaseSpecifier.__hash__cCsdS)zq
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        Nr)rotherrrr__eq__$szBaseSpecifier.__eq__cCsdS)zu
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        Nr)rrrrr__ne__+szBaseSpecifier.__ne__cCsdS)zg
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr)rrrrprereleases2szBaseSpecifier.prereleasescCsdS)zd
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr)rvaluerrrr9sNcCsdS)zR
        Determines if the given item is contained within this specifier.
        Nr)ritemrrrrcontains@szBaseSpecifier.containscCsdS)z
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        Nr)riterablerrrrfilterFszBaseSpecifier.filter)N)N)rr
rabcabstractmethodrrrrabstractpropertyrsetterrrrrrrrsrc@seZdZiZd ddZddZddZd	d
ZddZd
dZ	ddZ
ddZeddZ
eddZeddZejddZddZd!ddZd"ddZdS)#_IndividualSpecifierNcCsF|jj|}|stdj||jdj|jdjf|_||_dS)NzInvalid specifier: '{0}'operatorversion)_regexsearchrformatgroupstrip_spec_prereleases)rspecrmatchrrr__init__Rsz_IndividualSpecifier.__init__cCs0|jdk	rdj|jnd}dj|jjt||S)Nz, prereleases={0!r}r$z<{0}({1!r}{2})>)r-r)r	__class__rstr)rprerrr__repr___sz_IndividualSpecifier.__repr__cCsdj|jS)Nz{0}{1})r)r,)rrrrrlsz_IndividualSpecifier.__str__cCs
t|jS)N)hashr,)rrrrrosz_IndividualSpecifier.__hash__cCsLt|tr0y|j|}Wq@tk
r,tSXnt||js@tS|j|jkS)N)
isinstancerr1rNotImplementedr,)rrrrrrrs
z_IndividualSpecifier.__eq__cCsLt|tr0y|j|}Wq@tk
r,tSXnt||js@tS|j|jkS)N)r6rr1rr7r,)rrrrrr}s
z_IndividualSpecifier.__ne__cCst|dj|j|S)Nz_compare_{0})getattrr)
_operators)roprrr
_get_operatorsz"_IndividualSpecifier._get_operatorcCst|ttfst|}|S)N)r6r	rr
)rr&rrr_coerce_versionsz$_IndividualSpecifier._coerce_versioncCs
|jdS)Nr)r,)rrrrr%sz_IndividualSpecifier.operatorcCs
|jdS)Nr)r,)rrrrr&sz_IndividualSpecifier.versioncCs|jS)N)r-)rrrrrsz _IndividualSpecifier.prereleasescCs
||_dS)N)r-)rrrrrrscCs
|j|S)N)r)rrrrr__contains__sz!_IndividualSpecifier.__contains__cCs<|dkr|j}|j|}|jr(|r(dS|j|j||jS)NF)rr<
is_prereleaser;r%r&)rrrrrrrs
z_IndividualSpecifier.containsccsd}g}d|dk	r|ndi}xL|D]D}|j|}|j|f|r"|jr\|pL|jr\|j|q"d}|Vq"W|r|rx|D]
}|VqzWdS)NFrT)r<rr>rappend)rrrZyieldedfound_prereleaseskwr&parsed_versionrrrrs




z_IndividualSpecifier.filter)r$N)N)N)rr
rr9r0r4rrrrr;r<propertyr%r&rr"r=rrrrrrr#Ns 



r#c@sveZdZdZejdedejejBZdddddd	d
Z	ddZ
d
dZddZddZ
ddZddZddZdS)LegacySpecifiera
        (?P(==|!=|<=|>=|<|>))
        \s*
        (?P
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        z^\s*z\s*$equal	not_equalless_than_equalgreater_than_equal	less_thangreater_than)z==z!=z<=z>=<>cCst|tstt|}|S)N)r6r	r2)rr&rrrr<s
zLegacySpecifier._coerce_versioncCs||j|kS)N)r<)rprospectiver.rrr_compare_equalszLegacySpecifier._compare_equalcCs||j|kS)N)r<)rrMr.rrr_compare_not_equalsz"LegacySpecifier._compare_not_equalcCs||j|kS)N)r<)rrMr.rrr_compare_less_than_equalsz(LegacySpecifier._compare_less_than_equalcCs||j|kS)N)r<)rrMr.rrr_compare_greater_than_equalsz+LegacySpecifier._compare_greater_than_equalcCs||j|kS)N)r<)rrMr.rrr_compare_less_thansz"LegacySpecifier._compare_less_thancCs||j|kS)N)r<)rrMr.rrr_compare_greater_thansz%LegacySpecifier._compare_greater_thanN)rr
r
_regex_strrecompileVERBOSE
IGNORECASEr'r9r<rNrOrPrQrRrSrrrrrDs 
rDcstjfdd}|S)Ncst|tsdS|||S)NF)r6r)rrMr.)fnrrwrappeds
z)_require_version_compare..wrapped)	functoolswraps)rYrZr)rYr_require_version_compare
sr]c	@seZdZdZejdedejejBZdddddd	d
ddZ	e
d
dZe
ddZe
ddZ
e
ddZe
ddZe
ddZe
ddZddZeddZejddZd S)!	Specifiera
        (?P(~=|==|!=|<=|>=|<|>|===))
        (?P
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?=rKrLz===cCsNdjttjddt|dd}|d7}|jd||oL|jd||S)	N.cSs|jdo|jdS)NZpostZdev)
startswith)xrrrsz/Specifier._compare_compatible..rz.*z>=z==)joinlist	itertools	takewhile_version_splitr;)rrMr.prefixrrr_compare_compatibles
zSpecifier._compare_compatiblecCsp|jdrPt|j}t|dd}tt|}|dt|}t||\}}nt|}|jsht|j}||kS)Nz.*)endswithrZpublicrhr2len_pad_versionlocal)rrMr.rrrrNs


zSpecifier._compare_equalcCs|j||S)N)rN)rrMr.rrrrOszSpecifier._compare_not_equalcCs|t|kS)N)r)rrMr.rrrrPsz"Specifier._compare_less_than_equalcCs|t|kS)N)r)rrMr.rrrrQsz%Specifier._compare_greater_than_equalcCs>t|}||ksdS|jr:|jr:t|jt|jkr:dSdS)NFT)rr>base_version)rrMr.rrrrRszSpecifier._compare_less_thancCs`t|}||ksdS|jr:|jr:t|jt|jkr:dS|jdk	r\t|jt|jkr\dSdS)NFT)rZis_postreleaserqrp)rrMr.rrrrSs
zSpecifier._compare_greater_thancCst|jt|jkS)N)r2lower)rrMr.rrr_compare_arbitraryszSpecifier._compare_arbitrarycCsR|jdk	r|jS|j\}}|d
krN|dkr@|jdr@|dd}t|jrNdSd	S)N==>=<=~====z.*rkTF)rtrurvrwrxrl)r-r,rmr
r>)rr%r&rrrrs


zSpecifier.prereleasescCs
||_dS)N)r-)rrrrrrsN)rr
rrTrUrVrWrXr'r9r]rjrNrOrPrQrRrSrsrCrr"rrrrr^s*^#r^z^([0-9]+)((?:a|b|c|rc)[0-9]+)$cCsDg}x:|jdD],}tj|}|r2|j|jq|j|qW|S)Nr_)split
_prefix_regexr(extendgroupsr?)r&resultrr/rrrrh's
rhc	Csgg}}|jttjdd||jttjdd||j|t|dd|j|t|dd|jddgtdt|dt|d|jddgtdt|dt|dttj|ttj|fS)NcSs|jS)N)isdigit)rarrrrb6sz_pad_version..cSs|jS)N)r~)rarrrrb7srr0)r?rerfrgrninsertmaxchain)leftrightZ
left_splitZright_splitrrrro2s
&&roc@seZdZdddZddZddZd	d
ZddZd
dZddZ	ddZ
ddZeddZ
e
jddZ
ddZdddZd ddZdS)!SpecifierSetr$NcCsrdd|jdD}t}xB|D]:}y|jt|Wq tk
rX|jt|Yq Xq Wt||_||_dS)NcSsg|]}|jr|jqSr)r+).0srrr
Rsz)SpecifierSet.__init__..,)	rysetaddr^rrD	frozenset_specsr-)rZ
specifiersrZparsed	specifierrrrr0Os

zSpecifierSet.__init__cCs*|jdk	rdj|jnd}djt||S)Nz, prereleases={0!r}r$z)r-r)rr2)rr3rrrr4dszSpecifierSet.__repr__cCsdjtdd|jDS)Nrcss|]}t|VqdS)N)r2)rrrrr	nsz'SpecifierSet.__str__..)rdsortedr)rrrrrmszSpecifierSet.__str__cCs
t|jS)N)r5r)rrrrrpszSpecifierSet.__hash__cCst|trt|}nt|ts"tSt}t|j|jB|_|jdkrX|jdk	rX|j|_n<|jdk	rv|jdkrv|j|_n|j|jkr|j|_ntd|S)NzFCannot combine SpecifierSets with True and False prerelease overrides.)r6rrr7rrr-
ValueError)rrrrrr__and__ss





zSpecifierSet.__and__cCsFt|trt|}n&t|tr,tt|}nt|ts:tS|j|jkS)N)r6rrr#r2r7r)rrrrrrs



zSpecifierSet.__eq__cCsFt|trt|}n&t|tr,tt|}nt|ts:tS|j|jkS)N)r6rrr#r2r7r)rrrrrrs



zSpecifierSet.__ne__cCs
t|jS)N)rnr)rrrr__len__szSpecifierSet.__len__cCs
t|jS)N)iterr)rrrr__iter__szSpecifierSet.__iter__cCs.|jdk	r|jS|jsdStdd|jDS)Ncss|]}|jVqdS)N)r)rrrrrrsz+SpecifierSet.prereleases..)r-rany)rrrrrs

zSpecifierSet.prereleasescCs
||_dS)N)r-)rrrrrrscCs
|j|S)N)r)rrrrrr=szSpecifierSet.__contains__csNtttfstdkr$|jr4jr4dStfdd|jDS)NFc3s|]}|jdVqdS))rN)r)rr)rrrrrsz(SpecifierSet.contains..)r6r	rr
rr>allr)rrrr)rrrrszSpecifierSet.containscCs|dkr|j}|jr:x |jD]}|j|t|d}qW|Sg}g}xZ|D]R}t|ttfsdt|}n|}t|trtqH|jr|r|s|j	|qH|j	|qHW|r|r|dkr|S|SdS)N)r)
rrrboolr6r	rr
r>r?)rrrr.Zfilteredr@rrBrrrrs*


zSpecifierSet.filter)r$N)N)N)rr
rr0r4rrrrrrrrCrr"r=rrrrrrrMs
	


r)Z
__future__rrrrr[rfrUZ_compatrrr&rr	r
rrABCMetaobjectrr#rDr]r^rVrzrhrorrrrrs&9	4	
PKtge[vpackaging/__init__.pynu[# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

from .__about__ import (
    __author__, __copyright__, __email__, __license__, __summary__, __title__,
    __uri__, __version__
)

__all__ = [
    "__title__", "__summary__", "__uri__", "__version__", "__author__",
    "__email__", "__license__", "__copyright__",
]
PKtge[x(pyparsing.pynu[# module pyparsing.py
#
# Copyright (c) 2003-2016  Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#

__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars

The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.

Here is a program to parse "Hello, World!" (or any greeting of the form 
C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
(L{'+'} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::

    from pyparsing import Word, alphas

    # define grammar of a greeting
    greet = Word(alphas) + "," + Word(alphas) + "!"

    hello = "Hello, World!"
    print (hello, "->", greet.parseString(hello))

The program outputs the following::

    Hello, World! -> ['Hello', ',', 'World', '!']

The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.

The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
object with named attributes.

The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
 - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
 - quoted strings
 - embedded comments
"""

__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire "

import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime

try:
    from _thread import RLock
except ImportError:
    from threading import RLock

try:
    from collections import OrderedDict as _OrderedDict
except ImportError:
    try:
        from ordereddict import OrderedDict as _OrderedDict
    except ImportError:
        _OrderedDict = None

#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )

__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]

system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
    _MAX_INT = sys.maxsize
    basestring = str
    unichr = chr
    _ustr = str

    # build list of single arg builtins, that can be used as parse actions
    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]

else:
    _MAX_INT = sys.maxint
    range = xrange

    def _ustr(obj):
        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
           then < returns the unicode object | encodes it with the default encoding | ... >.
        """
        if isinstance(obj,unicode):
            return obj

        try:
            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
            # it won't break any existing code.
            return str(obj)

        except UnicodeEncodeError:
            # Else encode it
            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
            xmlcharref = Regex('&#\d+;')
            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
            return xmlcharref.transformString(ret)

    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
    singleArgBuiltins = []
    import __builtin__
    for fname in "sum len sorted reversed list tuple set any all min max".split():
        try:
            singleArgBuiltins.append(getattr(__builtin__,fname))
        except AttributeError:
            continue
            
_generatorType = type((y for y in range(1)))
 
def _xml_escape(data):
    """Escape &, <, >, ", ', etc. in a string of data."""

    # ampersand must be replaced first
    from_symbols = '&><"\''
    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
    for from_,to_ in zip(from_symbols, to_symbols):
        data = data.replace(from_, to_)
    return data

class _Constants(object):
    pass

alphas     = string.ascii_uppercase + string.ascii_lowercase
nums       = "0123456789"
hexnums    = nums + "ABCDEFabcdef"
alphanums  = alphas + nums
_bslash    = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)

class ParseBaseException(Exception):
    """base exception class for all parsing runtime exceptions"""
    # Performance tuning: we construct a *lot* of these, so keep this
    # constructor as small and fast as possible
    def __init__( self, pstr, loc=0, msg=None, elem=None ):
        self.loc = loc
        if msg is None:
            self.msg = pstr
            self.pstr = ""
        else:
            self.msg = msg
            self.pstr = pstr
        self.parserElement = elem
        self.args = (pstr, loc, msg)

    @classmethod
    def _from_exception(cls, pe):
        """
        internal factory method to simplify creating one type of ParseException 
        from another - avoids having __init__ signature conflicts among subclasses
        """
        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)

    def __getattr__( self, aname ):
        """supported attributes by name are:
            - lineno - returns the line number of the exception text
            - col - returns the column number of the exception text
            - line - returns the line containing the exception text
        """
        if( aname == "lineno" ):
            return lineno( self.loc, self.pstr )
        elif( aname in ("col", "column") ):
            return col( self.loc, self.pstr )
        elif( aname == "line" ):
            return line( self.loc, self.pstr )
        else:
            raise AttributeError(aname)

    def __str__( self ):
        return "%s (at char %d), (line:%d, col:%d)" % \
                ( self.msg, self.loc, self.lineno, self.column )
    def __repr__( self ):
        return _ustr(self)
    def markInputline( self, markerString = ">!<" ):
        """Extracts the exception line from the input string, and marks
           the location of the exception with a special symbol.
        """
        line_str = self.line
        line_column = self.column - 1
        if markerString:
            line_str = "".join((line_str[:line_column],
                                markerString, line_str[line_column:]))
        return line_str.strip()
    def __dir__(self):
        return "lineno col line".split() + dir(type(self))

class ParseException(ParseBaseException):
    """
    Exception thrown when parse expressions don't match class;
    supported attributes by name are:
     - lineno - returns the line number of the exception text
     - col - returns the column number of the exception text
     - line - returns the line containing the exception text
        
    Example::
        try:
            Word(nums).setName("integer").parseString("ABC")
        except ParseException as pe:
            print(pe)
            print("column: {}".format(pe.col))
            
    prints::
       Expected integer (at char 0), (line:1, col:1)
        column: 1
    """
    pass

class ParseFatalException(ParseBaseException):
    """user-throwable exception thrown when inconsistent parse content
       is found; stops all parsing immediately"""
    pass

class ParseSyntaxException(ParseFatalException):
    """just like L{ParseFatalException}, but thrown internally when an
       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
       immediately because an unbacktrackable syntax error has been found"""
    pass

#~ class ReparseException(ParseBaseException):
    #~ """Experimental class - parse actions can raise this exception to cause
       #~ pyparsing to reparse the input string:
        #~ - with a modified input string, and/or
        #~ - with a modified start location
       #~ Set the values of the ReparseException in the constructor, and raise the
       #~ exception in a parse action to cause pyparsing to use the new string/location.
       #~ Setting the values as None causes no change to be made.
       #~ """
    #~ def __init_( self, newstring, restartLoc ):
        #~ self.newParseText = newstring
        #~ self.reparseLoc = restartLoc

class RecursiveGrammarException(Exception):
    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
    def __init__( self, parseElementList ):
        self.parseElementTrace = parseElementList

    def __str__( self ):
        return "RecursiveGrammarException: %s" % self.parseElementTrace

class _ParseResultsWithOffset(object):
    def __init__(self,p1,p2):
        self.tup = (p1,p2)
    def __getitem__(self,i):
        return self.tup[i]
    def __repr__(self):
        return repr(self.tup[0])
    def setOffset(self,i):
        self.tup = (self.tup[0],i)

class ParseResults(object):
    """
    Structured parse results, to provide multiple means of access to the parsed data:
       - as a list (C{len(results)})
       - by list index (C{results[0], results[1]}, etc.)
       - by attribute (C{results.} - see L{ParserElement.setResultsName})

    Example::
        integer = Word(nums)
        date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))
        # equivalent form:
        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

        # parseString returns a ParseResults object
        result = date_str.parseString("1999/12/31")

        def test(s, fn=repr):
            print("%s -> %s" % (s, fn(eval(s))))
        test("list(result)")
        test("result[0]")
        test("result['month']")
        test("result.day")
        test("'month' in result")
        test("'minutes' in result")
        test("result.dump()", str)
    prints::
        list(result) -> ['1999', '/', '12', '/', '31']
        result[0] -> '1999'
        result['month'] -> '12'
        result.day -> '31'
        'month' in result -> True
        'minutes' in result -> False
        result.dump() -> ['1999', '/', '12', '/', '31']
        - day: 31
        - month: 12
        - year: 1999
    """
    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
        if isinstance(toklist, cls):
            return toklist
        retobj = object.__new__(cls)
        retobj.__doinit = True
        return retobj

    # Performance tuning: we construct a *lot* of these, so keep this
    # constructor as small and fast as possible
    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
        if self.__doinit:
            self.__doinit = False
            self.__name = None
            self.__parent = None
            self.__accumNames = {}
            self.__asList = asList
            self.__modal = modal
            if toklist is None:
                toklist = []
            if isinstance(toklist, list):
                self.__toklist = toklist[:]
            elif isinstance(toklist, _generatorType):
                self.__toklist = list(toklist)
            else:
                self.__toklist = [toklist]
            self.__tokdict = dict()

        if name is not None and name:
            if not modal:
                self.__accumNames[name] = 0
            if isinstance(name,int):
                name = _ustr(name) # will always return a str, but use _ustr for consistency
            self.__name = name
            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
                if isinstance(toklist,basestring):
                    toklist = [ toklist ]
                if asList:
                    if isinstance(toklist,ParseResults):
                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
                    else:
                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
                    self[name].__name = name
                else:
                    try:
                        self[name] = toklist[0]
                    except (KeyError,TypeError,IndexError):
                        self[name] = toklist

    def __getitem__( self, i ):
        if isinstance( i, (int,slice) ):
            return self.__toklist[i]
        else:
            if i not in self.__accumNames:
                return self.__tokdict[i][-1][0]
            else:
                return ParseResults([ v[0] for v in self.__tokdict[i] ])

    def __setitem__( self, k, v, isinstance=isinstance ):
        if isinstance(v,_ParseResultsWithOffset):
            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
            sub = v[0]
        elif isinstance(k,(int,slice)):
            self.__toklist[k] = v
            sub = v
        else:
            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
            sub = v
        if isinstance(sub,ParseResults):
            sub.__parent = wkref(self)

    def __delitem__( self, i ):
        if isinstance(i,(int,slice)):
            mylen = len( self.__toklist )
            del self.__toklist[i]

            # convert int to slice
            if isinstance(i, int):
                if i < 0:
                    i += mylen
                i = slice(i, i+1)
            # get removed indices
            removed = list(range(*i.indices(mylen)))
            removed.reverse()
            # fixup indices in token dictionary
            for name,occurrences in self.__tokdict.items():
                for j in removed:
                    for k, (value, position) in enumerate(occurrences):
                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
        else:
            del self.__tokdict[i]

    def __contains__( self, k ):
        return k in self.__tokdict

    def __len__( self ): return len( self.__toklist )
    def __bool__(self): return ( not not self.__toklist )
    __nonzero__ = __bool__
    def __iter__( self ): return iter( self.__toklist )
    def __reversed__( self ): return iter( self.__toklist[::-1] )
    def _iterkeys( self ):
        if hasattr(self.__tokdict, "iterkeys"):
            return self.__tokdict.iterkeys()
        else:
            return iter(self.__tokdict)

    def _itervalues( self ):
        return (self[k] for k in self._iterkeys())
            
    def _iteritems( self ):
        return ((k, self[k]) for k in self._iterkeys())

    if PY_3:
        keys = _iterkeys       
        """Returns an iterator of all named result keys (Python 3.x only)."""

        values = _itervalues
        """Returns an iterator of all named result values (Python 3.x only)."""

        items = _iteritems
        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""

    else:
        iterkeys = _iterkeys
        """Returns an iterator of all named result keys (Python 2.x only)."""

        itervalues = _itervalues
        """Returns an iterator of all named result values (Python 2.x only)."""

        iteritems = _iteritems
        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""

        def keys( self ):
            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.iterkeys())

        def values( self ):
            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.itervalues())
                
        def items( self ):
            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.iteritems())

    def haskeys( self ):
        """Since keys() returns an iterator, this method is helpful in bypassing
           code that looks for the existence of any defined results names."""
        return bool(self.__tokdict)
        
    def pop( self, *args, **kwargs):
        """
        Removes and returns item at specified index (default=C{last}).
        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
        argument or an integer argument, it will use C{list} semantics
        and pop tokens from the list of parsed tokens. If passed a 
        non-integer argument (most likely a string), it will use C{dict}
        semantics and pop the corresponding value from any defined 
        results names. A second default return value argument is 
        supported, just as in C{dict.pop()}.

        Example::
            def remove_first(tokens):
                tokens.pop(0)
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

            label = Word(alphas)
            patt = label("LABEL") + OneOrMore(Word(nums))
            print(patt.parseString("AAB 123 321").dump())

            # Use pop() in a parse action to remove named result (note that corresponding value is not
            # removed from list form of results)
            def remove_LABEL(tokens):
                tokens.pop("LABEL")
                return tokens
            patt.addParseAction(remove_LABEL)
            print(patt.parseString("AAB 123 321").dump())
        prints::
            ['AAB', '123', '321']
            - LABEL: AAB

            ['AAB', '123', '321']
        """
        if not args:
            args = [-1]
        for k,v in kwargs.items():
            if k == 'default':
                args = (args[0], v)
            else:
                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
        if (isinstance(args[0], int) or 
                        len(args) == 1 or 
                        args[0] in self):
            index = args[0]
            ret = self[index]
            del self[index]
            return ret
        else:
            defaultvalue = args[1]
            return defaultvalue

    def get(self, key, defaultValue=None):
        """
        Returns named result matching the given key, or if there is no
        such name, then returns the given C{defaultValue} or C{None} if no
        C{defaultValue} is specified.

        Similar to C{dict.get()}.
        
        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            result = date_str.parseString("1999/12/31")
            print(result.get("year")) # -> '1999'
            print(result.get("hour", "not specified")) # -> 'not specified'
            print(result.get("hour")) # -> None
        """
        if key in self:
            return self[key]
        else:
            return defaultValue

    def insert( self, index, insStr ):
        """
        Inserts new element at location index in the list of parsed tokens.
        
        Similar to C{list.insert()}.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

            # use a parse action to insert the parse location in the front of the parsed results
            def insert_locn(locn, tokens):
                tokens.insert(0, locn)
            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
        """
        self.__toklist.insert(index, insStr)
        # fixup indices in token dictionary
        for name,occurrences in self.__tokdict.items():
            for k, (value, position) in enumerate(occurrences):
                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))

    def append( self, item ):
        """
        Add single element to end of ParseResults list of elements.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            
            # use a parse action to compute the sum of the parsed integers, and add it to the end
            def append_sum(tokens):
                tokens.append(sum(map(int, tokens)))
            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
        """
        self.__toklist.append(item)

    def extend( self, itemseq ):
        """
        Add sequence of elements to end of ParseResults list of elements.

        Example::
            patt = OneOrMore(Word(alphas))
            
            # use a parse action to append the reverse of the matched strings, to make a palindrome
            def make_palindrome(tokens):
                tokens.extend(reversed([t[::-1] for t in tokens]))
                return ''.join(tokens)
            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
        """
        if isinstance(itemseq, ParseResults):
            self += itemseq
        else:
            self.__toklist.extend(itemseq)

    def clear( self ):
        """
        Clear all elements and results names.
        """
        del self.__toklist[:]
        self.__tokdict.clear()

    def __getattr__( self, name ):
        try:
            return self[name]
        except KeyError:
            return ""
            
        if name in self.__tokdict:
            if name not in self.__accumNames:
                return self.__tokdict[name][-1][0]
            else:
                return ParseResults([ v[0] for v in self.__tokdict[name] ])
        else:
            return ""

    def __add__( self, other ):
        ret = self.copy()
        ret += other
        return ret

    def __iadd__( self, other ):
        if other.__tokdict:
            offset = len(self.__toklist)
            addoffset = lambda a: offset if a<0 else a+offset
            otheritems = other.__tokdict.items()
            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
                                for (k,vlist) in otheritems for v in vlist]
            for k,v in otherdictitems:
                self[k] = v
                if isinstance(v[0],ParseResults):
                    v[0].__parent = wkref(self)
            
        self.__toklist += other.__toklist
        self.__accumNames.update( other.__accumNames )
        return self

    def __radd__(self, other):
        if isinstance(other,int) and other == 0:
            # useful for merging many ParseResults using sum() builtin
            return self.copy()
        else:
            # this may raise a TypeError - so be it
            return other + self
        
    def __repr__( self ):
        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )

    def __str__( self ):
        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'

    def _asStringList( self, sep='' ):
        out = []
        for item in self.__toklist:
            if out and sep:
                out.append(sep)
            if isinstance( item, ParseResults ):
                out += item._asStringList()
            else:
                out.append( _ustr(item) )
        return out

    def asList( self ):
        """
        Returns the parse results as a nested list of matching tokens, all converted to strings.

        Example::
            patt = OneOrMore(Word(alphas))
            result = patt.parseString("sldkj lsdkj sldkj")
            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
            
            # Use asList() to create an actual list
            result_list = result.asList()
            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
        """
        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]

    def asDict( self ):
        """
        Returns the named parse results as a nested dictionary.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
            
            result_dict = result.asDict()
            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}

            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
            import json
            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
        """
        if PY_3:
            item_fn = self.items
        else:
            item_fn = self.iteritems
            
        def toItem(obj):
            if isinstance(obj, ParseResults):
                if obj.haskeys():
                    return obj.asDict()
                else:
                    return [toItem(v) for v in obj]
            else:
                return obj
                
        return dict((k,toItem(v)) for k,v in item_fn())

    def copy( self ):
        """
        Returns a new copy of a C{ParseResults} object.
        """
        ret = ParseResults( self.__toklist )
        ret.__tokdict = self.__tokdict.copy()
        ret.__parent = self.__parent
        ret.__accumNames.update( self.__accumNames )
        ret.__name = self.__name
        return ret

    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
        """
        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
        """
        nl = "\n"
        out = []
        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
                                                            for v in vlist)
        nextLevelIndent = indent + "  "

        # collapse out indents if formatting is not desired
        if not formatted:
            indent = ""
            nextLevelIndent = ""
            nl = ""

        selfTag = None
        if doctag is not None:
            selfTag = doctag
        else:
            if self.__name:
                selfTag = self.__name

        if not selfTag:
            if namedItemsOnly:
                return ""
            else:
                selfTag = "ITEM"

        out += [ nl, indent, "<", selfTag, ">" ]

        for i,res in enumerate(self.__toklist):
            if isinstance(res,ParseResults):
                if i in namedItems:
                    out += [ res.asXML(namedItems[i],
                                        namedItemsOnly and doctag is None,
                                        nextLevelIndent,
                                        formatted)]
                else:
                    out += [ res.asXML(None,
                                        namedItemsOnly and doctag is None,
                                        nextLevelIndent,
                                        formatted)]
            else:
                # individual token, see if there is a name for it
                resTag = None
                if i in namedItems:
                    resTag = namedItems[i]
                if not resTag:
                    if namedItemsOnly:
                        continue
                    else:
                        resTag = "ITEM"
                xmlBodyText = _xml_escape(_ustr(res))
                out += [ nl, nextLevelIndent, "<", resTag, ">",
                                                xmlBodyText,
                                                "" ]

        out += [ nl, indent, "" ]
        return "".join(out)

    def __lookup(self,sub):
        for k,vlist in self.__tokdict.items():
            for v,loc in vlist:
                if sub is v:
                    return k
        return None

    def getName(self):
        """
        Returns the results name for this token expression. Useful when several 
        different expressions might match at a particular location.

        Example::
            integer = Word(nums)
            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
            house_number_expr = Suppress('#') + Word(nums, alphanums)
            user_data = (Group(house_number_expr)("house_number") 
                        | Group(ssn_expr)("ssn")
                        | Group(integer)("age"))
            user_info = OneOrMore(user_data)
            
            result = user_info.parseString("22 111-22-3333 #221B")
            for item in result:
                print(item.getName(), ':', item[0])
        prints::
            age : 22
            ssn : 111-22-3333
            house_number : 221B
        """
        if self.__name:
            return self.__name
        elif self.__parent:
            par = self.__parent()
            if par:
                return par.__lookup(self)
            else:
                return None
        elif (len(self) == 1 and
               len(self.__tokdict) == 1 and
               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
            return next(iter(self.__tokdict.keys()))
        else:
            return None

    def dump(self, indent='', depth=0, full=True):
        """
        Diagnostic method for listing out the contents of a C{ParseResults}.
        Accepts an optional C{indent} argument so that this string can be embedded
        in a nested display of other data.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(result.dump())
        prints::
            ['12', '/', '31', '/', '1999']
            - day: 1999
            - month: 31
            - year: 12
        """
        out = []
        NL = '\n'
        out.append( indent+_ustr(self.asList()) )
        if full:
            if self.haskeys():
                items = sorted((str(k), v) for k,v in self.items())
                for k,v in items:
                    if out:
                        out.append(NL)
                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
                    if isinstance(v,ParseResults):
                        if v:
                            out.append( v.dump(indent,depth+1) )
                        else:
                            out.append(_ustr(v))
                    else:
                        out.append(repr(v))
            elif any(isinstance(vv,ParseResults) for vv in self):
                v = self
                for i,vv in enumerate(v):
                    if isinstance(vv,ParseResults):
                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
                    else:
                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
            
        return "".join(out)

    def pprint(self, *args, **kwargs):
        """
        Pretty-printer for parsed results as a list, using the C{pprint} module.
        Accepts additional positional or keyword args as defined for the 
        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

        Example::
            ident = Word(alphas, alphanums)
            num = Word(nums)
            func = Forward()
            term = ident | num | Group('(' + func + ')')
            func <<= ident + Group(Optional(delimitedList(term)))
            result = func.parseString("fna a,b,(fnb c,d,200),100")
            result.pprint(width=40)
        prints::
            ['fna',
             ['a',
              'b',
              ['(', 'fnb', ['c', 'd', '200'], ')'],
              '100']]
        """
        pprint.pprint(self.asList(), *args, **kwargs)

    # add support for pickle protocol
    def __getstate__(self):
        return ( self.__toklist,
                 ( self.__tokdict.copy(),
                   self.__parent is not None and self.__parent() or None,
                   self.__accumNames,
                   self.__name ) )

    def __setstate__(self,state):
        self.__toklist = state[0]
        (self.__tokdict,
         par,
         inAccumNames,
         self.__name) = state[1]
        self.__accumNames = {}
        self.__accumNames.update(inAccumNames)
        if par is not None:
            self.__parent = wkref(par)
        else:
            self.__parent = None

    def __getnewargs__(self):
        return self.__toklist, self.__name, self.__asList, self.__modal

    def __dir__(self):
        return (dir(type(self)) + list(self.keys()))

collections.MutableMapping.register(ParseResults)

def col (loc,strg):
    """Returns current column within a string, counting newlines as line separators.
   The first column is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
   on parsing strings containing C{}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   """
    s = strg
    return 1 if 0} for more information
   on parsing strings containing C{}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   """
    return strg.count("\n",0,loc) + 1

def line( loc, strg ):
    """Returns the line of text containing loc within a string, counting newlines as line separators.
       """
    lastCR = strg.rfind("\n", 0, loc)
    nextCR = strg.find("\n", loc)
    if nextCR >= 0:
        return strg[lastCR+1:nextCR]
    else:
        return strg[lastCR+1:]

def _defaultStartDebugAction( instring, loc, expr ):
    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))

def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))

def _defaultExceptionDebugAction( instring, loc, expr, exc ):
    print ("Exception raised:" + _ustr(exc))

def nullDebugAction(*args):
    """'Do-nothing' debug action, to suppress debugging output during parsing."""
    pass

# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
    #~ if func in singleArgBuiltins:
        #~ return lambda s,l,t: func(t)
    #~ limit = 0
    #~ foundArity = False
    #~ def wrapper(*args):
        #~ nonlocal limit,foundArity
        #~ while 1:
            #~ try:
                #~ ret = func(*args[limit:])
                #~ foundArity = True
                #~ return ret
            #~ except TypeError:
                #~ if limit == maxargs or foundArity:
                    #~ raise
                #~ limit += 1
                #~ continue
    #~ return wrapper

# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
    if func in singleArgBuiltins:
        return lambda s,l,t: func(t)
    limit = [0]
    foundArity = [False]
    
    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
    if system_version[:2] >= (3,5):
        def extract_stack(limit=0):
            # special handling for Python 3.5.0 - extra deep call stack by 1
            offset = -3 if system_version == (3,5,0) else -2
            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
            return [(frame_summary.filename, frame_summary.lineno)]
        def extract_tb(tb, limit=0):
            frames = traceback.extract_tb(tb, limit=limit)
            frame_summary = frames[-1]
            return [(frame_summary.filename, frame_summary.lineno)]
    else:
        extract_stack = traceback.extract_stack
        extract_tb = traceback.extract_tb
    
    # synthesize what would be returned by traceback.extract_stack at the call to 
    # user's parse action 'func', so that we don't incur call penalty at parse time
    
    LINE_DIFF = 6
    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
    this_line = extract_stack(limit=2)[-1]
    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)

    def wrapper(*args):
        while 1:
            try:
                ret = func(*args[limit[0]:])
                foundArity[0] = True
                return ret
            except TypeError:
                # re-raise TypeErrors if they did not come from our arity testing
                if foundArity[0]:
                    raise
                else:
                    try:
                        tb = sys.exc_info()[-1]
                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
                            raise
                    finally:
                        del tb

                if limit[0] <= maxargs:
                    limit[0] += 1
                    continue
                raise

    # copy func name to wrapper for sensible debug output
    func_name = ""
    try:
        func_name = getattr(func, '__name__', 
                            getattr(func, '__class__').__name__)
    except Exception:
        func_name = str(func)
    wrapper.__name__ = func_name

    return wrapper

class ParserElement(object):
    """Abstract base level parser element class."""
    DEFAULT_WHITE_CHARS = " \n\t\r"
    verbose_stacktrace = False

    @staticmethod
    def setDefaultWhitespaceChars( chars ):
        r"""
        Overrides the default whitespace chars

        Example::
            # default whitespace chars are space,  and newline
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
            
            # change to just treat newline as significant
            ParserElement.setDefaultWhitespaceChars(" \t")
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
        """
        ParserElement.DEFAULT_WHITE_CHARS = chars

    @staticmethod
    def inlineLiteralsUsing(cls):
        """
        Set class to be used for inclusion of string literals into a parser.
        
        Example::
            # default literal class used is Literal
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']


            # change to Suppress
            ParserElement.inlineLiteralsUsing(Suppress)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
        """
        ParserElement._literalStringClass = cls

    def __init__( self, savelist=False ):
        self.parseAction = list()
        self.failAction = None
        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
        self.strRepr = None
        self.resultsName = None
        self.saveAsList = savelist
        self.skipWhitespace = True
        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
        self.copyDefaultWhiteChars = True
        self.mayReturnEmpty = False # used when checking for left-recursion
        self.keepTabs = False
        self.ignoreExprs = list()
        self.debug = False
        self.streamlined = False
        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
        self.errmsg = ""
        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
        self.debugActions = ( None, None, None ) #custom debug actions
        self.re = None
        self.callPreparse = True # used to avoid redundant calls to preParse
        self.callDuringTry = False

    def copy( self ):
        """
        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
        for the same parsing pattern, using copies of the original parse element.
        
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
            
            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
        prints::
            [5120, 100, 655360, 268435456]
        Equivalent form of C{expr.copy()} is just C{expr()}::
            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
        """
        cpy = copy.copy( self )
        cpy.parseAction = self.parseAction[:]
        cpy.ignoreExprs = self.ignoreExprs[:]
        if self.copyDefaultWhiteChars:
            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
        return cpy

    def setName( self, name ):
        """
        Define name for this expression, makes debugging and exception messages clearer.
        
        Example::
            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
        """
        self.name = name
        self.errmsg = "Expected " + self.name
        if hasattr(self,"exception"):
            self.exception.msg = self.errmsg
        return self

    def setResultsName( self, name, listAllMatches=False ):
        """
        Define name for referencing matching tokens as a nested attribute
        of the returned parse results.
        NOTE: this returns a *copy* of the original C{ParserElement} object;
        this is so that the client can define a basic element, such as an
        integer, and reference it in multiple places with different names.

        You can also set results names using the abbreviated syntax,
        C{expr("name")} in place of C{expr.setResultsName("name")} - 
        see L{I{__call__}<__call__>}.

        Example::
            date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))

            # equivalent form:
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
        """
        newself = self.copy()
        if name.endswith("*"):
            name = name[:-1]
            listAllMatches=True
        newself.resultsName = name
        newself.modalResults = not listAllMatches
        return newself

    def setBreak(self,breakFlag = True):
        """Method to invoke the Python pdb debugger when this element is
           about to be parsed. Set C{breakFlag} to True to enable, False to
           disable.
        """
        if breakFlag:
            _parseMethod = self._parse
            def breaker(instring, loc, doActions=True, callPreParse=True):
                import pdb
                pdb.set_trace()
                return _parseMethod( instring, loc, doActions, callPreParse )
            breaker._originalParseMethod = _parseMethod
            self._parse = breaker
        else:
            if hasattr(self._parse,"_originalParseMethod"):
                self._parse = self._parse._originalParseMethod
        return self

    def setParseAction( self, *fns, **kwargs ):
        """
        Define action to perform when successfully matching parse element definition.
        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
         - s   = the original string being parsed (see note below)
         - loc = the location of the matching substring
         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
        If the functions in fns modify the tokens, they can return them as the return
        value from fn, and the modified list of tokens will replace the original.
        Otherwise, fn does not need to return any value.

        Optional keyword arguments:
         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

        Note: the default parsing behavior is to expand tabs in the input string
        before starting the parsing process.  See L{I{parseString}} for more information
        on parsing strings containing C{}s, and suggested methods to maintain a
        consistent view of the parsed string, the parse location, and line and column
        positions within the parsed string.
        
        Example::
            integer = Word(nums)
            date_str = integer + '/' + integer + '/' + integer

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

            # use parse action to convert to ints at parse time
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            date_str = integer + '/' + integer + '/' + integer

            # note that integer fields are now ints, not strings
            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
        """
        self.parseAction = list(map(_trim_arity, list(fns)))
        self.callDuringTry = kwargs.get("callDuringTry", False)
        return self

    def addParseAction( self, *fns, **kwargs ):
        """
        Add parse action to expression's list of parse actions. See L{I{setParseAction}}.
        
        See examples in L{I{copy}}.
        """
        self.parseAction += list(map(_trim_arity, list(fns)))
        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
        return self

    def addCondition(self, *fns, **kwargs):
        """Add a boolean predicate function to expression's list of parse actions. See 
        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
        functions passed to C{addCondition} need to return boolean success/fail of the condition.

        Optional keyword arguments:
         - message = define a custom message to be used in the raised exception
         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
         
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            year_int = integer.copy()
            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
            date_str = year_int + '/' + integer + '/' + integer

            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
        """
        msg = kwargs.get("message", "failed user-defined condition")
        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
        for fn in fns:
            def pa(s,l,t):
                if not bool(_trim_arity(fn)(s,l,t)):
                    raise exc_type(s,l,msg)
            self.parseAction.append(pa)
        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
        return self

    def setFailAction( self, fn ):
        """Define action to perform if parsing fails at this expression.
           Fail acton fn is a callable function that takes the arguments
           C{fn(s,loc,expr,err)} where:
            - s = string being parsed
            - loc = location where expression match was attempted and failed
            - expr = the parse expression that failed
            - err = the exception thrown
           The function returns no value.  It may throw C{L{ParseFatalException}}
           if it is desired to stop parsing immediately."""
        self.failAction = fn
        return self

    def _skipIgnorables( self, instring, loc ):
        exprsFound = True
        while exprsFound:
            exprsFound = False
            for e in self.ignoreExprs:
                try:
                    while 1:
                        loc,dummy = e._parse( instring, loc )
                        exprsFound = True
                except ParseException:
                    pass
        return loc

    def preParse( self, instring, loc ):
        if self.ignoreExprs:
            loc = self._skipIgnorables( instring, loc )

        if self.skipWhitespace:
            wt = self.whiteChars
            instrlen = len(instring)
            while loc < instrlen and instring[loc] in wt:
                loc += 1

        return loc

    def parseImpl( self, instring, loc, doActions=True ):
        return loc, []

    def postParse( self, instring, loc, tokenlist ):
        return tokenlist

    #~ @profile
    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
        debugging = ( self.debug ) #and doActions )

        if debugging or self.failAction:
            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
            if (self.debugActions[0] ):
                self.debugActions[0]( instring, loc, self )
            if callPreParse and self.callPreparse:
                preloc = self.preParse( instring, loc )
            else:
                preloc = loc
            tokensStart = preloc
            try:
                try:
                    loc,tokens = self.parseImpl( instring, preloc, doActions )
                except IndexError:
                    raise ParseException( instring, len(instring), self.errmsg, self )
            except ParseBaseException as err:
                #~ print ("Exception raised:", err)
                if self.debugActions[2]:
                    self.debugActions[2]( instring, tokensStart, self, err )
                if self.failAction:
                    self.failAction( instring, tokensStart, self, err )
                raise
        else:
            if callPreParse and self.callPreparse:
                preloc = self.preParse( instring, loc )
            else:
                preloc = loc
            tokensStart = preloc
            if self.mayIndexError or loc >= len(instring):
                try:
                    loc,tokens = self.parseImpl( instring, preloc, doActions )
                except IndexError:
                    raise ParseException( instring, len(instring), self.errmsg, self )
            else:
                loc,tokens = self.parseImpl( instring, preloc, doActions )

        tokens = self.postParse( instring, loc, tokens )

        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
        if self.parseAction and (doActions or self.callDuringTry):
            if debugging:
                try:
                    for fn in self.parseAction:
                        tokens = fn( instring, tokensStart, retTokens )
                        if tokens is not None:
                            retTokens = ParseResults( tokens,
                                                      self.resultsName,
                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
                                                      modal=self.modalResults )
                except ParseBaseException as err:
                    #~ print "Exception raised in user parse action:", err
                    if (self.debugActions[2] ):
                        self.debugActions[2]( instring, tokensStart, self, err )
                    raise
            else:
                for fn in self.parseAction:
                    tokens = fn( instring, tokensStart, retTokens )
                    if tokens is not None:
                        retTokens = ParseResults( tokens,
                                                  self.resultsName,
                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
                                                  modal=self.modalResults )

        if debugging:
            #~ print ("Matched",self,"->",retTokens.asList())
            if (self.debugActions[1] ):
                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )

        return loc, retTokens

    def tryParse( self, instring, loc ):
        try:
            return self._parse( instring, loc, doActions=False )[0]
        except ParseFatalException:
            raise ParseException( instring, loc, self.errmsg, self)
    
    def canParseNext(self, instring, loc):
        try:
            self.tryParse(instring, loc)
        except (ParseException, IndexError):
            return False
        else:
            return True

    class _UnboundedCache(object):
        def __init__(self):
            cache = {}
            self.not_in_cache = not_in_cache = object()

            def get(self, key):
                return cache.get(key, not_in_cache)

            def set(self, key, value):
                cache[key] = value

            def clear(self):
                cache.clear()

            self.get = types.MethodType(get, self)
            self.set = types.MethodType(set, self)
            self.clear = types.MethodType(clear, self)

    if _OrderedDict is not None:
        class _FifoCache(object):
            def __init__(self, size):
                self.not_in_cache = not_in_cache = object()

                cache = _OrderedDict()

                def get(self, key):
                    return cache.get(key, not_in_cache)

                def set(self, key, value):
                    cache[key] = value
                    if len(cache) > size:
                        cache.popitem(False)

                def clear(self):
                    cache.clear()

                self.get = types.MethodType(get, self)
                self.set = types.MethodType(set, self)
                self.clear = types.MethodType(clear, self)

    else:
        class _FifoCache(object):
            def __init__(self, size):
                self.not_in_cache = not_in_cache = object()

                cache = {}
                key_fifo = collections.deque([], size)

                def get(self, key):
                    return cache.get(key, not_in_cache)

                def set(self, key, value):
                    cache[key] = value
                    if len(cache) > size:
                        cache.pop(key_fifo.popleft(), None)
                    key_fifo.append(key)

                def clear(self):
                    cache.clear()
                    key_fifo.clear()

                self.get = types.MethodType(get, self)
                self.set = types.MethodType(set, self)
                self.clear = types.MethodType(clear, self)

    # argument cache for optimizing repeated calls when backtracking through recursive expressions
    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
    packrat_cache_lock = RLock()
    packrat_cache_stats = [0, 0]

    # this method gets repeatedly called during backtracking with the same arguments -
    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
        HIT, MISS = 0, 1
        lookup = (self, instring, loc, callPreParse, doActions)
        with ParserElement.packrat_cache_lock:
            cache = ParserElement.packrat_cache
            value = cache.get(lookup)
            if value is cache.not_in_cache:
                ParserElement.packrat_cache_stats[MISS] += 1
                try:
                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
                except ParseBaseException as pe:
                    # cache a copy of the exception, without the traceback
                    cache.set(lookup, pe.__class__(*pe.args))
                    raise
                else:
                    cache.set(lookup, (value[0], value[1].copy()))
                    return value
            else:
                ParserElement.packrat_cache_stats[HIT] += 1
                if isinstance(value, Exception):
                    raise value
                return (value[0], value[1].copy())

    _parse = _parseNoCache

    @staticmethod
    def resetCache():
        ParserElement.packrat_cache.clear()
        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)

    _packratEnabled = False
    @staticmethod
    def enablePackrat(cache_size_limit=128):
        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
           Repeated parse attempts at the same string location (which happens
           often in many complex grammars) can immediately return a cached value,
           instead of re-executing parsing/validating code.  Memoizing is done of
           both valid results and parsing exceptions.
           
           Parameters:
            - cache_size_limit - (default=C{128}) - if an integer value is provided
              will limit the size of the packrat cache; if None is passed, then
              the cache size will be unbounded; if 0 is passed, the cache will
              be effectively disabled.
            
           This speedup may break existing programs that use parse actions that
           have side-effects.  For this reason, packrat parsing is disabled when
           you first import pyparsing.  To activate the packrat feature, your
           program must call the class method C{ParserElement.enablePackrat()}.  If
           your program uses C{psyco} to "compile as you go", you must call
           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
           Python will crash.  For best results, call C{enablePackrat()} immediately
           after importing pyparsing.
           
           Example::
               import pyparsing
               pyparsing.ParserElement.enablePackrat()
        """
        if not ParserElement._packratEnabled:
            ParserElement._packratEnabled = True
            if cache_size_limit is None:
                ParserElement.packrat_cache = ParserElement._UnboundedCache()
            else:
                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
            ParserElement._parse = ParserElement._parseCache

    def parseString( self, instring, parseAll=False ):
        """
        Execute the parse expression with the given string.
        This is the main interface to the client code, once the complete
        expression has been built.

        If you want the grammar to require that the entire input string be
        successfully parsed, then set C{parseAll} to True (equivalent to ending
        the grammar with C{L{StringEnd()}}).

        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
        in order to report proper column numbers in parse actions.
        If the input string contains tabs and
        the grammar uses parse actions that use the C{loc} argument to index into the
        string being parsed, you can ensure you have a consistent view of the input
        string by:
         - calling C{parseWithTabs} on your grammar before calling C{parseString}
           (see L{I{parseWithTabs}})
         - define your parse action using the full C{(s,loc,toks)} signature, and
           reference the input string using the parse action's C{s} argument
         - explictly expand the tabs in your input string before calling
           C{parseString}
        
        Example::
            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
        """
        ParserElement.resetCache()
        if not self.streamlined:
            self.streamline()
            #~ self.saveAsList = True
        for e in self.ignoreExprs:
            e.streamline()
        if not self.keepTabs:
            instring = instring.expandtabs()
        try:
            loc, tokens = self._parse( instring, 0 )
            if parseAll:
                loc = self.preParse( instring, loc )
                se = Empty() + StringEnd()
                se._parse( instring, loc )
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc
        else:
            return tokens

    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
        """
        Scan the input string for expression matches.  Each match will return the
        matching tokens, start location, and end location.  May be called with optional
        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
        C{overlap} is specified, then overlapping matches will be reported.

        Note that the start and end locations are reported relative to the string
        being parsed.  See L{I{parseString}} for more information on parsing
        strings with embedded tabs.

        Example::
            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
            print(source)
            for tokens,start,end in Word(alphas).scanString(source):
                print(' '*start + '^'*(end-start))
                print(' '*start + tokens[0])
        
        prints::
        
            sldjf123lsdjjkf345sldkjf879lkjsfd987
            ^^^^^
            sldjf
                    ^^^^^^^
                    lsdjjkf
                              ^^^^^^
                              sldkjf
                                       ^^^^^^
                                       lkjsfd
        """
        if not self.streamlined:
            self.streamline()
        for e in self.ignoreExprs:
            e.streamline()

        if not self.keepTabs:
            instring = _ustr(instring).expandtabs()
        instrlen = len(instring)
        loc = 0
        preparseFn = self.preParse
        parseFn = self._parse
        ParserElement.resetCache()
        matches = 0
        try:
            while loc <= instrlen and matches < maxMatches:
                try:
                    preloc = preparseFn( instring, loc )
                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
                except ParseException:
                    loc = preloc+1
                else:
                    if nextLoc > loc:
                        matches += 1
                        yield tokens, preloc, nextLoc
                        if overlap:
                            nextloc = preparseFn( instring, loc )
                            if nextloc > loc:
                                loc = nextLoc
                            else:
                                loc += 1
                        else:
                            loc = nextLoc
                    else:
                        loc = preloc+1
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def transformString( self, instring ):
        """
        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
        be returned from a parse action.  To use C{transformString}, define a grammar and
        attach a parse action to it that modifies the returned token list.
        Invoking C{transformString()} on a target string will then scan for matches,
        and replace the matched text patterns according to the logic in the parse
        action.  C{transformString()} returns the resulting transformed string.
        
        Example::
            wd = Word(alphas)
            wd.setParseAction(lambda toks: toks[0].title())
            
            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
        Prints::
            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
        """
        out = []
        lastE = 0
        # force preservation of s, to minimize unwanted transformation of string, and to
        # keep string locs straight between transformString and scanString
        self.keepTabs = True
        try:
            for t,s,e in self.scanString( instring ):
                out.append( instring[lastE:s] )
                if t:
                    if isinstance(t,ParseResults):
                        out += t.asList()
                    elif isinstance(t,list):
                        out += t
                    else:
                        out.append(t)
                lastE = e
            out.append(instring[lastE:])
            out = [o for o in out if o]
            return "".join(map(_ustr,_flatten(out)))
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def searchString( self, instring, maxMatches=_MAX_INT ):
        """
        Another extension to C{L{scanString}}, simplifying the access to the tokens found
        to match the given parse expression.  May be called with optional
        C{maxMatches} argument, to clip searching after 'n' matches are found.
        
        Example::
            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
            cap_word = Word(alphas.upper(), alphas.lower())
            
            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
        prints::
            ['More', 'Iron', 'Lead', 'Gold', 'I']
        """
        try:
            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
        """
        Generator method to split a string using the given expression as a separator.
        May be called with optional C{maxsplit} argument, to limit the number of splits;
        and the optional C{includeSeparators} argument (default=C{False}), if the separating
        matching text should be included in the split results.
        
        Example::        
            punc = oneOf(list(".,;:/-!?"))
            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
        prints::
            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
        """
        splits = 0
        last = 0
        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
            yield instring[last:s]
            if includeSeparators:
                yield t[0]
            last = e
        yield instring[last:]

    def __add__(self, other ):
        """
        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
        converts them to L{Literal}s by default.
        
        Example::
            greet = Word(alphas) + "," + Word(alphas) + "!"
            hello = "Hello, World!"
            print (hello, "->", greet.parseString(hello))
        Prints::
            Hello, World! -> ['Hello', ',', 'World', '!']
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return And( [ self, other ] )

    def __radd__(self, other ):
        """
        Implementation of + operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other + self

    def __sub__(self, other):
        """
        Implementation of - operator, returns C{L{And}} with error stop
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return And( [ self, And._ErrorStop(), other ] )

    def __rsub__(self, other ):
        """
        Implementation of - operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other - self

    def __mul__(self,other):
        """
        Implementation of * operator, allows use of C{expr * 3} in place of
        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
        may also include C{None} as in:
         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
              to C{expr*n + L{ZeroOrMore}(expr)}
              (read as "at least n instances of C{expr}")
         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
              (read as "0 to n instances of C{expr}")
         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

        Note that C{expr*(None,n)} does not raise an exception if
        more than n exprs exist in the input stream; that is,
        C{expr*(None,n)} does not enforce a maximum number of expr
        occurrences.  If this behavior is desired, then write
        C{expr*(None,n) + ~expr}
        """
        if isinstance(other,int):
            minElements, optElements = other,0
        elif isinstance(other,tuple):
            other = (other + (None, None))[:2]
            if other[0] is None:
                other = (0, other[1])
            if isinstance(other[0],int) and other[1] is None:
                if other[0] == 0:
                    return ZeroOrMore(self)
                if other[0] == 1:
                    return OneOrMore(self)
                else:
                    return self*other[0] + ZeroOrMore(self)
            elif isinstance(other[0],int) and isinstance(other[1],int):
                minElements, optElements = other
                optElements -= minElements
            else:
                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
        else:
            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))

        if minElements < 0:
            raise ValueError("cannot multiply ParserElement by negative value")
        if optElements < 0:
            raise ValueError("second tuple value must be greater or equal to first tuple value")
        if minElements == optElements == 0:
            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")

        if (optElements):
            def makeOptionalList(n):
                if n>1:
                    return Optional(self + makeOptionalList(n-1))
                else:
                    return Optional(self)
            if minElements:
                if minElements == 1:
                    ret = self + makeOptionalList(optElements)
                else:
                    ret = And([self]*minElements) + makeOptionalList(optElements)
            else:
                ret = makeOptionalList(optElements)
        else:
            if minElements == 1:
                ret = self
            else:
                ret = And([self]*minElements)
        return ret

    def __rmul__(self, other):
        return self.__mul__(other)

    def __or__(self, other ):
        """
        Implementation of | operator - returns C{L{MatchFirst}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return MatchFirst( [ self, other ] )

    def __ror__(self, other ):
        """
        Implementation of | operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other | self

    def __xor__(self, other ):
        """
        Implementation of ^ operator - returns C{L{Or}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return Or( [ self, other ] )

    def __rxor__(self, other ):
        """
        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other ^ self

    def __and__(self, other ):
        """
        Implementation of & operator - returns C{L{Each}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return Each( [ self, other ] )

    def __rand__(self, other ):
        """
        Implementation of & operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other & self

    def __invert__( self ):
        """
        Implementation of ~ operator - returns C{L{NotAny}}
        """
        return NotAny( self )

    def __call__(self, name=None):
        """
        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
        
        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
        passed as C{True}.
           
        If C{name} is omitted, same as calling C{L{copy}}.

        Example::
            # these are equivalent
            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
        """
        if name is not None:
            return self.setResultsName(name)
        else:
            return self.copy()

    def suppress( self ):
        """
        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
        cluttering up returned output.
        """
        return Suppress( self )

    def leaveWhitespace( self ):
        """
        Disables the skipping of whitespace before matching the characters in the
        C{ParserElement}'s defined pattern.  This is normally only used internally by
        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
        """
        self.skipWhitespace = False
        return self

    def setWhitespaceChars( self, chars ):
        """
        Overrides the default whitespace chars
        """
        self.skipWhitespace = True
        self.whiteChars = chars
        self.copyDefaultWhiteChars = False
        return self

    def parseWithTabs( self ):
        """
        Overrides default behavior to expand C{}s to spaces before parsing the input string.
        Must be called before C{parseString} when the input grammar contains elements that
        match C{} characters.
        """
        self.keepTabs = True
        return self

    def ignore( self, other ):
        """
        Define expression to be ignored (e.g., comments) while doing pattern
        matching; may be called repeatedly, to define multiple comment or other
        ignorable patterns.
        
        Example::
            patt = OneOrMore(Word(alphas))
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
            
            patt.ignore(cStyleComment)
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
        """
        if isinstance(other, basestring):
            other = Suppress(other)

        if isinstance( other, Suppress ):
            if other not in self.ignoreExprs:
                self.ignoreExprs.append(other)
        else:
            self.ignoreExprs.append( Suppress( other.copy() ) )
        return self

    def setDebugActions( self, startAction, successAction, exceptionAction ):
        """
        Enable display of debugging messages while doing pattern matching.
        """
        self.debugActions = (startAction or _defaultStartDebugAction,
                             successAction or _defaultSuccessDebugAction,
                             exceptionAction or _defaultExceptionDebugAction)
        self.debug = True
        return self

    def setDebug( self, flag=True ):
        """
        Enable display of debugging messages while doing pattern matching.
        Set C{flag} to True to enable, False to disable.

        Example::
            wd = Word(alphas).setName("alphaword")
            integer = Word(nums).setName("numword")
            term = wd | integer
            
            # turn on debugging for wd
            wd.setDebug()

            OneOrMore(term).parseString("abc 123 xyz 890")
        
        prints::
            Match alphaword at loc 0(1,1)
            Matched alphaword -> ['abc']
            Match alphaword at loc 3(1,4)
            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
            Match alphaword at loc 7(1,8)
            Matched alphaword -> ['xyz']
            Match alphaword at loc 11(1,12)
            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
            Match alphaword at loc 15(1,16)
            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

        The output shown is that produced by the default debug actions - custom debug actions can be
        specified using L{setDebugActions}. Prior to attempting
        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
        which makes debugging and exception messages easier to understand - for instance, the default
        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
        """
        if flag:
            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
        else:
            self.debug = False
        return self

    def __str__( self ):
        return self.name

    def __repr__( self ):
        return _ustr(self)

    def streamline( self ):
        self.streamlined = True
        self.strRepr = None
        return self

    def checkRecursion( self, parseElementList ):
        pass

    def validate( self, validateTrace=[] ):
        """
        Check defined expressions for valid structure, check for infinite recursive definitions.
        """
        self.checkRecursion( [] )

    def parseFile( self, file_or_filename, parseAll=False ):
        """
        Execute the parse expression on the given file or filename.
        If a filename is specified (instead of a file object),
        the entire file is opened, read, and closed before parsing.
        """
        try:
            file_contents = file_or_filename.read()
        except AttributeError:
            with open(file_or_filename, "r") as f:
                file_contents = f.read()
        try:
            return self.parseString(file_contents, parseAll)
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def __eq__(self,other):
        if isinstance(other, ParserElement):
            return self is other or vars(self) == vars(other)
        elif isinstance(other, basestring):
            return self.matches(other)
        else:
            return super(ParserElement,self)==other

    def __ne__(self,other):
        return not (self == other)

    def __hash__(self):
        return hash(id(self))

    def __req__(self,other):
        return self == other

    def __rne__(self,other):
        return not (self == other)

    def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.
           
        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
            
        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
                
    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
        """
        Execute the parse expression on a series of test strings, showing each
        test, the parsed results or where the parse failed. Quick and easy way to
        run a parse expression against a list of sample strings.
           
        Parameters:
         - tests - a list of separate test strings, or a multiline string of test strings
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
              string; pass None to disable comment filtering
         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
              if False, only dump nested list
         - printResults - (default=C{True}) prints test output to stdout
         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

        Returns: a (success, results) tuple, where success indicates that all tests succeeded
        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
        test's output
        
        Example::
            number_expr = pyparsing_common.number.copy()

            result = number_expr.runTests('''
                # unsigned integer
                100
                # negative integer
                -100
                # float with scientific notation
                6.02e23
                # integer with scientific notation
                1e-12
                ''')
            print("Success" if result[0] else "Failed!")

            result = number_expr.runTests('''
                # stray character
                100Z
                # missing leading digit before '.'
                -.100
                # too many '.'
                3.14.159
                ''', failureTests=True)
            print("Success" if result[0] else "Failed!")
        prints::
            # unsigned integer
            100
            [100]

            # negative integer
            -100
            [-100]

            # float with scientific notation
            6.02e23
            [6.02e+23]

            # integer with scientific notation
            1e-12
            [1e-12]

            Success
            
            # stray character
            100Z
               ^
            FAIL: Expected end of text (at char 3), (line:1, col:4)

            # missing leading digit before '.'
            -.100
            ^
            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

            # too many '.'
            3.14.159
                ^
            FAIL: Expected end of text (at char 4), (line:1, col:5)

            Success

        Each test string must be on a single line. If you want to test a string that spans multiple
        lines, create a test like this::

            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
        
        (Note that this is a raw string literal, you must include the leading 'r'.)
        """
        if isinstance(tests, basestring):
            tests = list(map(str.strip, tests.rstrip().splitlines()))
        if isinstance(comment, basestring):
            comment = Literal(comment)
        allResults = []
        comments = []
        success = True
        for t in tests:
            if comment is not None and comment.matches(t, False) or comments and not t:
                comments.append(t)
                continue
            if not t:
                continue
            out = ['\n'.join(comments), t]
            comments = []
            try:
                t = t.replace(r'\n','\n')
                result = self.parseString(t, parseAll=parseAll)
                out.append(result.dump(full=fullDump))
                success = success and not failureTests
            except ParseBaseException as pe:
                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
                if '\n' in t:
                    out.append(line(pe.loc, t))
                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
                else:
                    out.append(' '*pe.loc + '^' + fatal)
                out.append("FAIL: " + str(pe))
                success = success and failureTests
                result = pe
            except Exception as exc:
                out.append("FAIL-EXCEPTION: " + str(exc))
                success = success and failureTests
                result = exc

            if printResults:
                if fullDump:
                    out.append('')
                print('\n'.join(out))

            allResults.append((t, result))
        
        return success, allResults

        
class Token(ParserElement):
    """
    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    """
    def __init__( self ):
        super(Token,self).__init__( savelist=False )


class Empty(Token):
    """
    An empty token, will always match.
    """
    def __init__( self ):
        super(Empty,self).__init__()
        self.name = "Empty"
        self.mayReturnEmpty = True
        self.mayIndexError = False


class NoMatch(Token):
    """
    A token that will never match.
    """
    def __init__( self ):
        super(NoMatch,self).__init__()
        self.name = "NoMatch"
        self.mayReturnEmpty = True
        self.mayIndexError = False
        self.errmsg = "Unmatchable token"

    def parseImpl( self, instring, loc, doActions=True ):
        raise ParseException(instring, loc, self.errmsg, self)


class Literal(Token):
    """
    Token to exactly match a specified string.
    
    Example::
        Literal('blah').parseString('blah')  # -> ['blah']
        Literal('blah').parseString('blahfooblah')  # -> ['blah']
        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    
    For case-insensitive matching, use L{CaselessLiteral}.
    
    For keyword matching (force word break before and after the matched string),
    use L{Keyword} or L{CaselessKeyword}.
    """
    def __init__( self, matchString ):
        super(Literal,self).__init__()
        self.match = matchString
        self.matchLen = len(matchString)
        try:
            self.firstMatchChar = matchString[0]
        except IndexError:
            warnings.warn("null string passed to Literal; use Empty() instead",
                            SyntaxWarning, stacklevel=2)
            self.__class__ = Empty
        self.name = '"%s"' % _ustr(self.match)
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = False
        self.mayIndexError = False

    # Performance tuning: this routine gets called a *lot*
    # if this is a single character match string  and the first character matches,
    # short-circuit as quickly as possible, and avoid calling startswith
    #~ @profile
    def parseImpl( self, instring, loc, doActions=True ):
        if (instring[loc] == self.firstMatchChar and
            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
            return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal

class Keyword(Token):
    """
    Token to exactly match a specified string as a keyword, that is, it must be
    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    Accepts two optional constructor arguments in addition to the keyword string:
     - C{identChars} is a string of characters that would be valid identifier characters,
          defaulting to all alphanumerics + "_" and "$"
     - C{caseless} allows case-insensitive matching, default is C{False}.
       
    Example::
        Keyword("start").parseString("start")  # -> ['start']
        Keyword("start").parseString("starting")  # -> Exception

    For case-insensitive matching, use L{CaselessKeyword}.
    """
    DEFAULT_KEYWORD_CHARS = alphanums+"_$"

    def __init__( self, matchString, identChars=None, caseless=False ):
        super(Keyword,self).__init__()
        if identChars is None:
            identChars = Keyword.DEFAULT_KEYWORD_CHARS
        self.match = matchString
        self.matchLen = len(matchString)
        try:
            self.firstMatchChar = matchString[0]
        except IndexError:
            warnings.warn("null string passed to Keyword; use Empty() instead",
                            SyntaxWarning, stacklevel=2)
        self.name = '"%s"' % self.match
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = False
        self.mayIndexError = False
        self.caseless = caseless
        if caseless:
            self.caselessmatch = matchString.upper()
            identChars = identChars.upper()
        self.identChars = set(identChars)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.caseless:
            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
                return loc+self.matchLen, self.match
        else:
            if (instring[loc] == self.firstMatchChar and
                (self.matchLen==1 or instring.startswith(self.match,loc)) and
                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
                (loc == 0 or instring[loc-1] not in self.identChars) ):
                return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)

    def copy(self):
        c = super(Keyword,self).copy()
        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
        return c

    @staticmethod
    def setDefaultKeywordChars( chars ):
        """Overrides the default Keyword chars
        """
        Keyword.DEFAULT_KEYWORD_CHARS = chars

class CaselessLiteral(Literal):
    """
    Token to match a specified string, ignoring case of letters.
    Note: the matched results will always be in the case of the given
    match string, NOT the case of the input text.

    Example::
        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
        
    (Contrast with example for L{CaselessKeyword}.)
    """
    def __init__( self, matchString ):
        super(CaselessLiteral,self).__init__( matchString.upper() )
        # Preserve the defining literal.
        self.returnString = matchString
        self.name = "'%s'" % self.returnString
        self.errmsg = "Expected " + self.name

    def parseImpl( self, instring, loc, doActions=True ):
        if instring[ loc:loc+self.matchLen ].upper() == self.match:
            return loc+self.matchLen, self.returnString
        raise ParseException(instring, loc, self.errmsg, self)

class CaselessKeyword(Keyword):
    """
    Caseless version of L{Keyword}.

    Example::
        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
        
    (Contrast with example for L{CaselessLiteral}.)
    """
    def __init__( self, matchString, identChars=None ):
        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )

    def parseImpl( self, instring, loc, doActions=True ):
        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
            return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)

class CloseMatch(Token):
    """
    A variation on L{Literal} which matches "close" matches, that is, 
    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
     - C{match_string} - string to be matched
     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    
    The results from a successful parse will contain the matched text from the input string and the following named results:
     - C{mismatches} - a list of the positions within the match_string where mismatches were found
     - C{original} - the original match_string used to compare against the input string
    
    If C{mismatches} is an empty list, then the match was an exact match.
    
    Example::
        patt = CloseMatch("ATCATCGAATGGA")
        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

        # exact match
        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

        # close match allowing up to 2 mismatches
        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    """
    def __init__(self, match_string, maxMismatches=1):
        super(CloseMatch,self).__init__()
        self.name = match_string
        self.match_string = match_string
        self.maxMismatches = maxMismatches
        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
        self.mayIndexError = False
        self.mayReturnEmpty = False

    def parseImpl( self, instring, loc, doActions=True ):
        start = loc
        instrlen = len(instring)
        maxloc = start + len(self.match_string)

        if maxloc <= instrlen:
            match_string = self.match_string
            match_stringloc = 0
            mismatches = []
            maxMismatches = self.maxMismatches

            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
                src,mat = s_m
                if src != mat:
                    mismatches.append(match_stringloc)
                    if len(mismatches) > maxMismatches:
                        break
            else:
                loc = match_stringloc + 1
                results = ParseResults([instring[start:loc]])
                results['original'] = self.match_string
                results['mismatches'] = mismatches
                return loc, results

        raise ParseException(instring, loc, self.errmsg, self)


class Word(Token):
    """
    Token for matching words composed of allowed character sets.
    Defined with string containing all allowed initial characters,
    an optional string containing allowed body characters (if omitted,
    defaults to the initial character set), and an optional minimum,
    maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction. An optional
    C{excludeChars} parameter can list characters that might be found in 
    the input C{bodyChars} string; useful to define a word of all printables
    except for one or two characters, for instance.
    
    L{srange} is useful for defining custom character set strings for defining 
    C{Word} expressions, using range notation from regular expression character sets.
    
    A common mistake is to use C{Word} to match a specific literal string, as in 
    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    I{sets} of matchable characters. This expression would match "Add", "AAA",
    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    To match an exact literal string, use L{Literal} or L{Keyword}.

    pyparsing includes helper strings for building Words:
     - L{alphas}
     - L{nums}
     - L{alphanums}
     - L{hexnums}
     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
     - L{printables} (any non-whitespace character)

    Example::
        # a word composed of digits
        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
        
        # a word with a leading capital, and zero or more lowercase
        capital_word = Word(alphas.upper(), alphas.lower())

        # hostnames are alphanumeric, with leading alpha, and '-'
        hostname = Word(alphas, alphanums+'-')
        
        # roman numeral (not a strict parser, accepts invalid mix of characters)
        roman = Word("IVXLCDM")
        
        # any string of non-whitespace characters, except for ','
        csv_value = Word(printables, excludeChars=",")
    """
    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
        super(Word,self).__init__()
        if excludeChars:
            initChars = ''.join(c for c in initChars if c not in excludeChars)
            if bodyChars:
                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
        self.initCharsOrig = initChars
        self.initChars = set(initChars)
        if bodyChars :
            self.bodyCharsOrig = bodyChars
            self.bodyChars = set(bodyChars)
        else:
            self.bodyCharsOrig = initChars
            self.bodyChars = set(initChars)

        self.maxSpecified = max > 0

        if min < 1:
            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.asKeyword = asKeyword

        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
            if self.bodyCharsOrig == self.initCharsOrig:
                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
            elif len(self.initCharsOrig) == 1:
                self.reString = "%s[%s]*" % \
                                      (re.escape(self.initCharsOrig),
                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
            else:
                self.reString = "[%s][%s]*" % \
                                      (_escapeRegexRangeChars(self.initCharsOrig),
                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
            if self.asKeyword:
                self.reString = r"\b"+self.reString+r"\b"
            try:
                self.re = re.compile( self.reString )
            except Exception:
                self.re = None

    def parseImpl( self, instring, loc, doActions=True ):
        if self.re:
            result = self.re.match(instring,loc)
            if not result:
                raise ParseException(instring, loc, self.errmsg, self)

            loc = result.end()
            return loc, result.group()

        if not(instring[ loc ] in self.initChars):
            raise ParseException(instring, loc, self.errmsg, self)

        start = loc
        loc += 1
        instrlen = len(instring)
        bodychars = self.bodyChars
        maxloc = start + self.maxLen
        maxloc = min( maxloc, instrlen )
        while loc < maxloc and instring[loc] in bodychars:
            loc += 1

        throwException = False
        if loc - start < self.minLen:
            throwException = True
        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
            throwException = True
        if self.asKeyword:
            if (start>0 and instring[start-1] in bodychars) or (loc4:
                    return s[:4]+"..."
                else:
                    return s

            if ( self.initCharsOrig != self.bodyCharsOrig ):
                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
            else:
                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)

        return self.strRepr


class Regex(Token):
    """
    Token for matching strings that match a given regular expression.
    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
    named parse results.

    Example::
        realnum = Regex(r"[+-]?\d+\.\d*")
        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    """
    compiledREtype = type(re.compile("[A-Z]"))
    def __init__( self, pattern, flags=0):
        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
        super(Regex,self).__init__()

        if isinstance(pattern, basestring):
            if not pattern:
                warnings.warn("null string passed to Regex; use Empty() instead",
                        SyntaxWarning, stacklevel=2)

            self.pattern = pattern
            self.flags = flags

            try:
                self.re = re.compile(self.pattern, self.flags)
                self.reString = self.pattern
            except sre_constants.error:
                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
                    SyntaxWarning, stacklevel=2)
                raise

        elif isinstance(pattern, Regex.compiledREtype):
            self.re = pattern
            self.pattern = \
            self.reString = str(pattern)
            self.flags = flags
            
        else:
            raise ValueError("Regex may only be constructed with a string or a compiled RE object")

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        result = self.re.match(instring,loc)
        if not result:
            raise ParseException(instring, loc, self.errmsg, self)

        loc = result.end()
        d = result.groupdict()
        ret = ParseResults(result.group())
        if d:
            for k in d:
                ret[k] = d[k]
        return loc,ret

    def __str__( self ):
        try:
            return super(Regex,self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            self.strRepr = "Re:(%s)" % repr(self.pattern)

        return self.strRepr


class QuotedString(Token):
    r"""
    Token for matching strings that are delimited by quoting characters.
    
    Defined with the following parameters:
        - quoteChar - string of one or more characters defining the quote delimiting string
        - escChar - character to escape quotes, typically backslash (default=C{None})
        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

    Example::
        qs = QuotedString('"')
        print(qs.searchString('lsjdf "This is the quote" sldjf'))
        complex_qs = QuotedString('{{', endQuoteChar='}}')
        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
        sql_qs = QuotedString('"', escQuote='""')
        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    prints::
        [['This is the quote']]
        [['This is the "quote"']]
        [['This is the quote with "embedded" quotes']]
    """
    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
        super(QuotedString,self).__init__()

        # remove white space from quote chars - wont work anyway
        quoteChar = quoteChar.strip()
        if not quoteChar:
            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
            raise SyntaxError()

        if endQuoteChar is None:
            endQuoteChar = quoteChar
        else:
            endQuoteChar = endQuoteChar.strip()
            if not endQuoteChar:
                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
                raise SyntaxError()

        self.quoteChar = quoteChar
        self.quoteCharLen = len(quoteChar)
        self.firstQuoteChar = quoteChar[0]
        self.endQuoteChar = endQuoteChar
        self.endQuoteCharLen = len(endQuoteChar)
        self.escChar = escChar
        self.escQuote = escQuote
        self.unquoteResults = unquoteResults
        self.convertWhitespaceEscapes = convertWhitespaceEscapes

        if multiline:
            self.flags = re.MULTILINE | re.DOTALL
            self.pattern = r'%s(?:[^%s%s]' % \
                ( re.escape(self.quoteChar),
                  _escapeRegexRangeChars(self.endQuoteChar[0]),
                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
        else:
            self.flags = 0
            self.pattern = r'%s(?:[^%s\n\r%s]' % \
                ( re.escape(self.quoteChar),
                  _escapeRegexRangeChars(self.endQuoteChar[0]),
                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
        if len(self.endQuoteChar) > 1:
            self.pattern += (
                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
                )
        if escQuote:
            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
        if escChar:
            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))

        try:
            self.re = re.compile(self.pattern, self.flags)
            self.reString = self.pattern
        except sre_constants.error:
            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
                SyntaxWarning, stacklevel=2)
            raise

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
        if not result:
            raise ParseException(instring, loc, self.errmsg, self)

        loc = result.end()
        ret = result.group()

        if self.unquoteResults:

            # strip off quotes
            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]

            if isinstance(ret,basestring):
                # replace escaped whitespace
                if '\\' in ret and self.convertWhitespaceEscapes:
                    ws_map = {
                        r'\t' : '\t',
                        r'\n' : '\n',
                        r'\f' : '\f',
                        r'\r' : '\r',
                    }
                    for wslit,wschar in ws_map.items():
                        ret = ret.replace(wslit, wschar)

                # replace escaped characters
                if self.escChar:
                    ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)

                # replace escaped quotes
                if self.escQuote:
                    ret = ret.replace(self.escQuote, self.endQuoteChar)

        return loc, ret

    def __str__( self ):
        try:
            return super(QuotedString,self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)

        return self.strRepr


class CharsNotIn(Token):
    """
    Token for matching words composed of characters I{not} in a given set (will
    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    Defined with string containing all disallowed characters, and an optional
    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction.

    Example::
        # define a comma-separated-value as anything that is not a ','
        csv_value = CharsNotIn(',')
        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    prints::
        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    """
    def __init__( self, notChars, min=1, max=0, exact=0 ):
        super(CharsNotIn,self).__init__()
        self.skipWhitespace = False
        self.notChars = notChars

        if min < 1:
            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = ( self.minLen == 0 )
        self.mayIndexError = False

    def parseImpl( self, instring, loc, doActions=True ):
        if instring[loc] in self.notChars:
            raise ParseException(instring, loc, self.errmsg, self)

        start = loc
        loc += 1
        notchars = self.notChars
        maxlen = min( start+self.maxLen, len(instring) )
        while loc < maxlen and \
              (instring[loc] not in notchars):
            loc += 1

        if loc - start < self.minLen:
            raise ParseException(instring, loc, self.errmsg, self)

        return loc, instring[start:loc]

    def __str__( self ):
        try:
            return super(CharsNotIn, self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            if len(self.notChars) > 4:
                self.strRepr = "!W:(%s...)" % self.notChars[:4]
            else:
                self.strRepr = "!W:(%s)" % self.notChars

        return self.strRepr

class White(Token):
    """
    Special matching class for matching whitespace.  Normally, whitespace is ignored
    by pyparsing grammars.  This class is included when some whitespace structures
    are significant.  Define with a string containing the whitespace characters to be
    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    as defined for the C{L{Word}} class.
    """
    whiteStrs = {
        " " : "",
        "\t": "",
        "\n": "",
        "\r": "",
        "\f": "",
        }
    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
        super(White,self).__init__()
        self.matchWhite = ws
        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
        #~ self.leaveWhitespace()
        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
        self.mayReturnEmpty = True
        self.errmsg = "Expected " + self.name

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

    def parseImpl( self, instring, loc, doActions=True ):
        if not(instring[ loc ] in self.matchWhite):
            raise ParseException(instring, loc, self.errmsg, self)
        start = loc
        loc += 1
        maxloc = start + self.maxLen
        maxloc = min( maxloc, len(instring) )
        while loc < maxloc and instring[loc] in self.matchWhite:
            loc += 1

        if loc - start < self.minLen:
            raise ParseException(instring, loc, self.errmsg, self)

        return loc, instring[start:loc]


class _PositionToken(Token):
    def __init__( self ):
        super(_PositionToken,self).__init__()
        self.name=self.__class__.__name__
        self.mayReturnEmpty = True
        self.mayIndexError = False

class GoToColumn(_PositionToken):
    """
    Token to advance to a specific column of input text; useful for tabular report scraping.
    """
    def __init__( self, colno ):
        super(GoToColumn,self).__init__()
        self.col = colno

    def preParse( self, instring, loc ):
        if col(loc,instring) != self.col:
            instrlen = len(instring)
            if self.ignoreExprs:
                loc = self._skipIgnorables( instring, loc )
            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
                loc += 1
        return loc

    def parseImpl( self, instring, loc, doActions=True ):
        thiscol = col( loc, instring )
        if thiscol > self.col:
            raise ParseException( instring, loc, "Text not in expected column", self )
        newloc = loc + self.col - thiscol
        ret = instring[ loc: newloc ]
        return newloc, ret


class LineStart(_PositionToken):
    """
    Matches if current position is at the beginning of a line within the parse string
    
    Example::
    
        test = '''\
        AAA this line
        AAA and this line
          AAA but not this one
        B AAA and definitely not this one
        '''

        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
            print(t)
    
    Prints::
        ['AAA', ' this line']
        ['AAA', ' and this line']    

    """
    def __init__( self ):
        super(LineStart,self).__init__()
        self.errmsg = "Expected start of line"

    def parseImpl( self, instring, loc, doActions=True ):
        if col(loc, instring) == 1:
            return loc, []
        raise ParseException(instring, loc, self.errmsg, self)

class LineEnd(_PositionToken):
    """
    Matches if current position is at the end of a line within the parse string
    """
    def __init__( self ):
        super(LineEnd,self).__init__()
        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
        self.errmsg = "Expected end of line"

    def parseImpl( self, instring, loc, doActions=True ):
        if loc len(instring):
            return loc, []
        else:
            raise ParseException(instring, loc, self.errmsg, self)

class WordStart(_PositionToken):
    """
    Matches if the current position is at the beginning of a Word, and
    is not preceded by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    the string being parsed, or at the beginning of a line.
    """
    def __init__(self, wordChars = printables):
        super(WordStart,self).__init__()
        self.wordChars = set(wordChars)
        self.errmsg = "Not at the start of a word"

    def parseImpl(self, instring, loc, doActions=True ):
        if loc != 0:
            if (instring[loc-1] in self.wordChars or
                instring[loc] not in self.wordChars):
                raise ParseException(instring, loc, self.errmsg, self)
        return loc, []

class WordEnd(_PositionToken):
    """
    Matches if the current position is at the end of a Word, and
    is not followed by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    the string being parsed, or at the end of a line.
    """
    def __init__(self, wordChars = printables):
        super(WordEnd,self).__init__()
        self.wordChars = set(wordChars)
        self.skipWhitespace = False
        self.errmsg = "Not at the end of a word"

    def parseImpl(self, instring, loc, doActions=True ):
        instrlen = len(instring)
        if instrlen>0 and loc maxExcLoc:
                    maxException = err
                    maxExcLoc = err.loc
            except IndexError:
                if len(instring) > maxExcLoc:
                    maxException = ParseException(instring,len(instring),e.errmsg,self)
                    maxExcLoc = len(instring)
            else:
                # save match among all matches, to retry longest to shortest
                matches.append((loc2, e))

        if matches:
            matches.sort(key=lambda x: -x[0])
            for _,e in matches:
                try:
                    return e._parse( instring, loc, doActions )
                except ParseException as err:
                    err.__traceback__ = None
                    if err.loc > maxExcLoc:
                        maxException = err
                        maxExcLoc = err.loc

        if maxException is not None:
            maxException.msg = self.errmsg
            raise maxException
        else:
            raise ParseException(instring, loc, "no defined alternatives to match", self)


    def __ixor__(self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        return self.append( other ) #Or( [ self, other ] )

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class MatchFirst(ParseExpression):
    """
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the first one listed is the one that will match.
    May be constructed using the C{'|'} operator.

    Example::
        # construct MatchFirst using '|' operator
        
        # watch the order of expressions to match
        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

        # put more selective expression first
        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    """
    def __init__( self, exprs, savelist = False ):
        super(MatchFirst,self).__init__(exprs, savelist)
        if self.exprs:
            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
        else:
            self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        maxExcLoc = -1
        maxException = None
        for e in self.exprs:
            try:
                ret = e._parse( instring, loc, doActions )
                return ret
            except ParseException as err:
                if err.loc > maxExcLoc:
                    maxException = err
                    maxExcLoc = err.loc
            except IndexError:
                if len(instring) > maxExcLoc:
                    maxException = ParseException(instring,len(instring),e.errmsg,self)
                    maxExcLoc = len(instring)

        # only got here if no expression matched, raise exception for match that made it the furthest
        else:
            if maxException is not None:
                maxException.msg = self.errmsg
                raise maxException
            else:
                raise ParseException(instring, loc, "no defined alternatives to match", self)

    def __ior__(self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        return self.append( other ) #MatchFirst( [ self, other ] )

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class Each(ParseExpression):
    """
    Requires all given C{ParseExpression}s to be found, but in any order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'&'} operator.

    Example::
        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
        integer = Word(nums)
        shape_attr = "shape:" + shape_type("shape")
        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
        color_attr = "color:" + color("color")
        size_attr = "size:" + integer("size")

        # use Each (using operator '&') to accept attributes in any order 
        # (shape and posn are required, color and size are optional)
        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

        shape_spec.runTests('''
            shape: SQUARE color: BLACK posn: 100, 120
            shape: CIRCLE size: 50 color: BLUE posn: 50,80
            color:GREEN size:20 shape:TRIANGLE posn:20,40
            '''
            )
    prints::
        shape: SQUARE color: BLACK posn: 100, 120
        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
        - color: BLACK
        - posn: ['100', ',', '120']
          - x: 100
          - y: 120
        - shape: SQUARE


        shape: CIRCLE size: 50 color: BLUE posn: 50,80
        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
        - color: BLUE
        - posn: ['50', ',', '80']
          - x: 50
          - y: 80
        - shape: CIRCLE
        - size: 50


        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
        - color: GREEN
        - posn: ['20', ',', '40']
          - x: 20
          - y: 40
        - shape: TRIANGLE
        - size: 20
    """
    def __init__( self, exprs, savelist = True ):
        super(Each,self).__init__(exprs, savelist)
        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
        self.skipWhitespace = True
        self.initExprGroups = True

    def parseImpl( self, instring, loc, doActions=True ):
        if self.initExprGroups:
            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
            self.optionals = opt1 + opt2
            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
            self.required += self.multirequired
            self.initExprGroups = False
        tmpLoc = loc
        tmpReqd = self.required[:]
        tmpOpt  = self.optionals[:]
        matchOrder = []

        keepMatching = True
        while keepMatching:
            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
            failed = []
            for e in tmpExprs:
                try:
                    tmpLoc = e.tryParse( instring, tmpLoc )
                except ParseException:
                    failed.append(e)
                else:
                    matchOrder.append(self.opt1map.get(id(e),e))
                    if e in tmpReqd:
                        tmpReqd.remove(e)
                    elif e in tmpOpt:
                        tmpOpt.remove(e)
            if len(failed) == len(tmpExprs):
                keepMatching = False

        if tmpReqd:
            missing = ", ".join(_ustr(e) for e in tmpReqd)
            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )

        # add any unmatched Optionals, in case they have default values defined
        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]

        resultlist = []
        for e in matchOrder:
            loc,results = e._parse(instring,loc,doActions)
            resultlist.append(results)

        finalResults = sum(resultlist, ParseResults([]))
        return loc, finalResults

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class ParseElementEnhance(ParserElement):
    """
    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    """
    def __init__( self, expr, savelist=False ):
        super(ParseElementEnhance,self).__init__(savelist)
        if isinstance( expr, basestring ):
            if issubclass(ParserElement._literalStringClass, Token):
                expr = ParserElement._literalStringClass(expr)
            else:
                expr = ParserElement._literalStringClass(Literal(expr))
        self.expr = expr
        self.strRepr = None
        if expr is not None:
            self.mayIndexError = expr.mayIndexError
            self.mayReturnEmpty = expr.mayReturnEmpty
            self.setWhitespaceChars( expr.whiteChars )
            self.skipWhitespace = expr.skipWhitespace
            self.saveAsList = expr.saveAsList
            self.callPreparse = expr.callPreparse
            self.ignoreExprs.extend(expr.ignoreExprs)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.expr is not None:
            return self.expr._parse( instring, loc, doActions, callPreParse=False )
        else:
            raise ParseException("",loc,self.errmsg,self)

    def leaveWhitespace( self ):
        self.skipWhitespace = False
        self.expr = self.expr.copy()
        if self.expr is not None:
            self.expr.leaveWhitespace()
        return self

    def ignore( self, other ):
        if isinstance( other, Suppress ):
            if other not in self.ignoreExprs:
                super( ParseElementEnhance, self).ignore( other )
                if self.expr is not None:
                    self.expr.ignore( self.ignoreExprs[-1] )
        else:
            super( ParseElementEnhance, self).ignore( other )
            if self.expr is not None:
                self.expr.ignore( self.ignoreExprs[-1] )
        return self

    def streamline( self ):
        super(ParseElementEnhance,self).streamline()
        if self.expr is not None:
            self.expr.streamline()
        return self

    def checkRecursion( self, parseElementList ):
        if self in parseElementList:
            raise RecursiveGrammarException( parseElementList+[self] )
        subRecCheckList = parseElementList[:] + [ self ]
        if self.expr is not None:
            self.expr.checkRecursion( subRecCheckList )

    def validate( self, validateTrace=[] ):
        tmp = validateTrace[:]+[self]
        if self.expr is not None:
            self.expr.validate(tmp)
        self.checkRecursion( [] )

    def __str__( self ):
        try:
            return super(ParseElementEnhance,self).__str__()
        except Exception:
            pass

        if self.strRepr is None and self.expr is not None:
            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
        return self.strRepr


class FollowedBy(ParseElementEnhance):
    """
    Lookahead matching of the given parse expression.  C{FollowedBy}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression matches at the current
    position.  C{FollowedBy} always returns a null token list.

    Example::
        # use FollowedBy to match a label only if it is followed by a ':'
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    prints::
        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    """
    def __init__( self, expr ):
        super(FollowedBy,self).__init__(expr)
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        self.expr.tryParse( instring, loc )
        return loc, []


class NotAny(ParseElementEnhance):
    """
    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression does I{not} match at the current
    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    always returns a null token list.  May be constructed using the '~' operator.

    Example::
        
    """
    def __init__( self, expr ):
        super(NotAny,self).__init__(expr)
        #~ self.leaveWhitespace()
        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
        self.mayReturnEmpty = True
        self.errmsg = "Found unwanted token, "+_ustr(self.expr)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.expr.canParseNext(instring, loc):
            raise ParseException(instring, loc, self.errmsg, self)
        return loc, []

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "~{" + _ustr(self.expr) + "}"

        return self.strRepr

class _MultipleMatch(ParseElementEnhance):
    def __init__( self, expr, stopOn=None):
        super(_MultipleMatch, self).__init__(expr)
        self.saveAsList = True
        ender = stopOn
        if isinstance(ender, basestring):
            ender = ParserElement._literalStringClass(ender)
        self.not_ender = ~ender if ender is not None else None

    def parseImpl( self, instring, loc, doActions=True ):
        self_expr_parse = self.expr._parse
        self_skip_ignorables = self._skipIgnorables
        check_ender = self.not_ender is not None
        if check_ender:
            try_not_ender = self.not_ender.tryParse
        
        # must be at least one (but first see if we are the stopOn sentinel;
        # if so, fail)
        if check_ender:
            try_not_ender(instring, loc)
        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
        try:
            hasIgnoreExprs = (not not self.ignoreExprs)
            while 1:
                if check_ender:
                    try_not_ender(instring, loc)
                if hasIgnoreExprs:
                    preloc = self_skip_ignorables( instring, loc )
                else:
                    preloc = loc
                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
                if tmptokens or tmptokens.haskeys():
                    tokens += tmptokens
        except (ParseException,IndexError):
            pass

        return loc, tokens
        
class OneOrMore(_MultipleMatch):
    """
    Repetition of one or more of the given expression.
    
    Parameters:
     - expr - expression that must match one or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: BLACK"
        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
        
        # could also be written as
        (attr_expr * (1,)).parseString(text).pprint()
    """

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + _ustr(self.expr) + "}..."

        return self.strRepr

class ZeroOrMore(_MultipleMatch):
    """
    Optional repetition of zero or more of the given expression.
    
    Parameters:
     - expr - expression that must match zero or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example: similar to L{OneOrMore}
    """
    def __init__( self, expr, stopOn=None):
        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
        self.mayReturnEmpty = True
        
    def parseImpl( self, instring, loc, doActions=True ):
        try:
            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
        except (ParseException,IndexError):
            return loc, []

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "[" + _ustr(self.expr) + "]..."

        return self.strRepr

class _NullToken(object):
    def __bool__(self):
        return False
    __nonzero__ = __bool__
    def __str__(self):
        return ""

_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
    """
    Optional matching of the given expression.

    Parameters:
     - expr - expression that must match zero or more times
     - default (optional) - value to be returned if the optional expression is not found.

    Example::
        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
        zip.runTests('''
            # traditional ZIP code
            12345
            
            # ZIP+4 form
            12101-0001
            
            # invalid ZIP
            98765-
            ''')
    prints::
        # traditional ZIP code
        12345
        ['12345']

        # ZIP+4 form
        12101-0001
        ['12101-0001']

        # invalid ZIP
        98765-
             ^
        FAIL: Expected end of text (at char 5), (line:1, col:6)
    """
    def __init__( self, expr, default=_optionalNotMatched ):
        super(Optional,self).__init__( expr, savelist=False )
        self.saveAsList = self.expr.saveAsList
        self.defaultValue = default
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        try:
            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
        except (ParseException,IndexError):
            if self.defaultValue is not _optionalNotMatched:
                if self.expr.resultsName:
                    tokens = ParseResults([ self.defaultValue ])
                    tokens[self.expr.resultsName] = self.defaultValue
                else:
                    tokens = [ self.defaultValue ]
            else:
                tokens = []
        return loc, tokens

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "[" + _ustr(self.expr) + "]"

        return self.strRepr

class SkipTo(ParseElementEnhance):
    """
    Token for skipping over all undefined text until the matched expression is found.

    Parameters:
     - expr - target expression marking the end of the data to be skipped
     - include - (default=C{False}) if True, the target expression is also parsed 
          (the skipped text and target expression are returned as a 2-element list).
     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
          comments) that might contain false matches to the target expression
     - failOn - (default=C{None}) define expressions that are not allowed to be 
          included in the skipped test; if found before the target expression is found, 
          the SkipTo is not a match

    Example::
        report = '''
            Outstanding Issues Report - 1 Jan 2000

               # | Severity | Description                               |  Days Open
            -----+----------+-------------------------------------------+-----------
             101 | Critical | Intermittent system crash                 |          6
              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
              79 | Minor    | System slow when running too many reports |         47
            '''
        integer = Word(nums)
        SEP = Suppress('|')
        # use SkipTo to simply match everything up until the next SEP
        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
        # - parse action will call token.strip() for each matched token, i.e., the description body
        string_data = SkipTo(SEP, ignore=quotedString)
        string_data.setParseAction(tokenMap(str.strip))
        ticket_expr = (integer("issue_num") + SEP 
                      + string_data("sev") + SEP 
                      + string_data("desc") + SEP 
                      + integer("days_open"))
        
        for tkt in ticket_expr.searchString(report):
            print tkt.dump()
    prints::
        ['101', 'Critical', 'Intermittent system crash', '6']
        - days_open: 6
        - desc: Intermittent system crash
        - issue_num: 101
        - sev: Critical
        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
        - days_open: 14
        - desc: Spelling error on Login ('log|n')
        - issue_num: 94
        - sev: Cosmetic
        ['79', 'Minor', 'System slow when running too many reports', '47']
        - days_open: 47
        - desc: System slow when running too many reports
        - issue_num: 79
        - sev: Minor
    """
    def __init__( self, other, include=False, ignore=None, failOn=None ):
        super( SkipTo, self ).__init__( other )
        self.ignoreExpr = ignore
        self.mayReturnEmpty = True
        self.mayIndexError = False
        self.includeMatch = include
        self.asList = False
        if isinstance(failOn, basestring):
            self.failOn = ParserElement._literalStringClass(failOn)
        else:
            self.failOn = failOn
        self.errmsg = "No match found for "+_ustr(self.expr)

    def parseImpl( self, instring, loc, doActions=True ):
        startloc = loc
        instrlen = len(instring)
        expr = self.expr
        expr_parse = self.expr._parse
        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
        
        tmploc = loc
        while tmploc <= instrlen:
            if self_failOn_canParseNext is not None:
                # break if failOn expression matches
                if self_failOn_canParseNext(instring, tmploc):
                    break
                    
            if self_ignoreExpr_tryParse is not None:
                # advance past ignore expressions
                while 1:
                    try:
                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
                    except ParseBaseException:
                        break
            
            try:
                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
            except (ParseException, IndexError):
                # no match, advance loc in string
                tmploc += 1
            else:
                # matched skipto expr, done
                break

        else:
            # ran off the end of the input string without matching skipto expr, fail
            raise ParseException(instring, loc, self.errmsg, self)

        # build up return values
        loc = tmploc
        skiptext = instring[startloc:loc]
        skipresult = ParseResults(skiptext)
        
        if self.includeMatch:
            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
            skipresult += mat

        return loc, skipresult

class Forward(ParseElementEnhance):
    """
    Forward declaration of an expression to be defined later -
    used for recursive grammars, such as algebraic infix notation.
    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
    Specifically, '|' has a lower precedence than '<<', so that::
        fwdExpr << a | b | c
    will actually be evaluated as::
        (fwdExpr << a) | b | c
    thereby leaving b and c out as parseable alternatives.  It is recommended that you
    explicitly group the values inserted into the C{Forward}::
        fwdExpr << (a | b | c)
    Converting to use the '<<=' operator instead will avoid this problem.

    See L{ParseResults.pprint} for an example of a recursive parser created using
    C{Forward}.
    """
    def __init__( self, other=None ):
        super(Forward,self).__init__( other, savelist=False )

    def __lshift__( self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass(other)
        self.expr = other
        self.strRepr = None
        self.mayIndexError = self.expr.mayIndexError
        self.mayReturnEmpty = self.expr.mayReturnEmpty
        self.setWhitespaceChars( self.expr.whiteChars )
        self.skipWhitespace = self.expr.skipWhitespace
        self.saveAsList = self.expr.saveAsList
        self.ignoreExprs.extend(self.expr.ignoreExprs)
        return self
        
    def __ilshift__(self, other):
        return self << other
    
    def leaveWhitespace( self ):
        self.skipWhitespace = False
        return self

    def streamline( self ):
        if not self.streamlined:
            self.streamlined = True
            if self.expr is not None:
                self.expr.streamline()
        return self

    def validate( self, validateTrace=[] ):
        if self not in validateTrace:
            tmp = validateTrace[:]+[self]
            if self.expr is not None:
                self.expr.validate(tmp)
        self.checkRecursion([])

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name
        return self.__class__.__name__ + ": ..."

        # stubbed out for now - creates awful memory and perf issues
        self._revertClass = self.__class__
        self.__class__ = _ForwardNoRecurse
        try:
            if self.expr is not None:
                retString = _ustr(self.expr)
            else:
                retString = "None"
        finally:
            self.__class__ = self._revertClass
        return self.__class__.__name__ + ": " + retString

    def copy(self):
        if self.expr is not None:
            return super(Forward,self).copy()
        else:
            ret = Forward()
            ret <<= self
            return ret

class _ForwardNoRecurse(Forward):
    def __str__( self ):
        return "..."

class TokenConverter(ParseElementEnhance):
    """
    Abstract subclass of C{ParseExpression}, for converting parsed results.
    """
    def __init__( self, expr, savelist=False ):
        super(TokenConverter,self).__init__( expr )#, savelist )
        self.saveAsList = False

class Combine(TokenConverter):
    """
    Converter to concatenate all matching tokens to a single string.
    By default, the matching patterns must also be contiguous in the input string;
    this can be disabled by specifying C{'adjacent=False'} in the constructor.

    Example::
        real = Word(nums) + '.' + Word(nums)
        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
        # will also erroneously match the following
        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

        real = Combine(Word(nums) + '.' + Word(nums))
        print(real.parseString('3.1416')) # -> ['3.1416']
        # no match when there are internal spaces
        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    """
    def __init__( self, expr, joinString="", adjacent=True ):
        super(Combine,self).__init__( expr )
        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
        if adjacent:
            self.leaveWhitespace()
        self.adjacent = adjacent
        self.skipWhitespace = True
        self.joinString = joinString
        self.callPreparse = True

    def ignore( self, other ):
        if self.adjacent:
            ParserElement.ignore(self, other)
        else:
            super( Combine, self).ignore( other )
        return self

    def postParse( self, instring, loc, tokenlist ):
        retToks = tokenlist.copy()
        del retToks[:]
        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)

        if self.resultsName and retToks.haskeys():
            return [ retToks ]
        else:
            return retToks

class Group(TokenConverter):
    """
    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

    Example::
        ident = Word(alphas)
        num = Word(nums)
        term = ident | num
        func = ident + Optional(delimitedList(term))
        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

        func = ident + Group(Optional(delimitedList(term)))
        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    """
    def __init__( self, expr ):
        super(Group,self).__init__( expr )
        self.saveAsList = True

    def postParse( self, instring, loc, tokenlist ):
        return [ tokenlist ]

class Dict(TokenConverter):
    """
    Converter to return a repetitive expression as a list, but also as a dictionary.
    Each element can also be referenced using the first token in the expression as its key.
    Useful for tabular report scraping when the first column can be used as a item key.

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        # print attributes as plain groups
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
        print(result.dump())
        
        # access named fields as dict entries, or output as dict
        print(result['shape'])        
        print(result.asDict())
    prints::
        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    See more examples at L{ParseResults} of accessing fields by results name.
    """
    def __init__( self, expr ):
        super(Dict,self).__init__( expr )
        self.saveAsList = True

    def postParse( self, instring, loc, tokenlist ):
        for i,tok in enumerate(tokenlist):
            if len(tok) == 0:
                continue
            ikey = tok[0]
            if isinstance(ikey,int):
                ikey = _ustr(tok[0]).strip()
            if len(tok)==1:
                tokenlist[ikey] = _ParseResultsWithOffset("",i)
            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
            else:
                dictvalue = tok.copy() #ParseResults(i)
                del dictvalue[0]
                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
                else:
                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)

        if self.resultsName:
            return [ tokenlist ]
        else:
            return tokenlist


class Suppress(TokenConverter):
    """
    Converter for ignoring the results of a parsed expression.

    Example::
        source = "a, b, c,d"
        wd = Word(alphas)
        wd_list1 = wd + ZeroOrMore(',' + wd)
        print(wd_list1.parseString(source))

        # often, delimiters that are useful during parsing are just in the
        # way afterward - use Suppress to keep them out of the parsed output
        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
        print(wd_list2.parseString(source))
    prints::
        ['a', ',', 'b', ',', 'c', ',', 'd']
        ['a', 'b', 'c', 'd']
    (See also L{delimitedList}.)
    """
    def postParse( self, instring, loc, tokenlist ):
        return []

    def suppress( self ):
        return self


class OnlyOnce(object):
    """
    Wrapper for parse actions, to ensure they are only called once.
    """
    def __init__(self, methodCall):
        self.callable = _trim_arity(methodCall)
        self.called = False
    def __call__(self,s,l,t):
        if not self.called:
            results = self.callable(s,l,t)
            self.called = True
            return results
        raise ParseException(s,l,"")
    def reset(self):
        self.called = False

def traceParseAction(f):
    """
    Decorator for debugging parse actions. 
    
    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

    Example::
        wd = Word(alphas)

        @traceParseAction
        def remove_duplicate_chars(tokens):
            return ''.join(sorted(set(''.join(tokens)))

        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    prints::
        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
        <3:
            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
        try:
            ret = f(*paArgs)
        except Exception as exc:
            sys.stderr.write( "< ['aa', 'bb', 'cc']
        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    """
    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
    if combine:
        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
    else:
        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)

def countedArray( expr, intExpr=None ):
    """
    Helper to define a counted list of expressions.
    This helper defines a pattern of the form::
        integer expr expr expr...
    where the leading integer tells how many expr expressions follow.
    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    
    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

    Example::
        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

        # in this parser, the leading integer value is given in binary,
        # '10' indicating that 2 values are in the array
        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    """
    arrayExpr = Forward()
    def countFieldParseAction(s,l,t):
        n = t[0]
        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
        return []
    if intExpr is None:
        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
    else:
        intExpr = intExpr.copy()
    intExpr.setName("arrayLen")
    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')

def _flatten(L):
    ret = []
    for i in L:
        if isinstance(i,list):
            ret.extend(_flatten(i))
        else:
            ret.append(i)
    return ret

def matchPreviousLiteral(expr):
    """
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousLiteral(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    If this is not desired, use C{matchPreviousExpr}.
    Do I{not} use with packrat parsing enabled.
    """
    rep = Forward()
    def copyTokenToRepeater(s,l,t):
        if t:
            if len(t) == 1:
                rep << t[0]
            else:
                # flatten t tokens
                tflat = _flatten(t.asList())
                rep << And(Literal(tt) for tt in tflat)
        else:
            rep << Empty()
    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    rep.setName('(prev) ' + _ustr(expr))
    return rep

def matchPreviousExpr(expr):
    """
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousExpr(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    the expressions are evaluated first, and then compared, so
    C{"1"} is compared with C{"10"}.
    Do I{not} use with packrat parsing enabled.
    """
    rep = Forward()
    e2 = expr.copy()
    rep <<= e2
    def copyTokenToRepeater(s,l,t):
        matchTokens = _flatten(t.asList())
        def mustMatchTheseTokens(s,l,t):
            theseTokens = _flatten(t.asList())
            if  theseTokens != matchTokens:
                raise ParseException("",0,"")
        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    rep.setName('(prev) ' + _ustr(expr))
    return rep

def _escapeRegexRangeChars(s):
    #~  escape these chars: ^-]
    for c in r"\^-]":
        s = s.replace(c,_bslash+c)
    s = s.replace("\n",r"\n")
    s = s.replace("\t",r"\t")
    return _ustr(s)

def oneOf( strs, caseless=False, useRegex=True ):
    """
    Helper to quickly define a set of alternative Literals, and makes sure to do
    longest-first testing when there is a conflict, regardless of the input order,
    but returns a C{L{MatchFirst}} for best performance.

    Parameters:
     - strs - a string of space-delimited literals, or a collection of string literals
     - caseless - (default=C{False}) - treat all literals as caseless
     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
          if creating a C{Regex} raises an exception)

    Example::
        comp_oper = oneOf("< = > <= >= !=")
        var = Word(alphas)
        number = Word(nums)
        term = var | number
        comparison_expr = term + comp_oper + term
        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    prints::
        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    """
    if caseless:
        isequal = ( lambda a,b: a.upper() == b.upper() )
        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
        parseElementClass = CaselessLiteral
    else:
        isequal = ( lambda a,b: a == b )
        masks = ( lambda a,b: b.startswith(a) )
        parseElementClass = Literal

    symbols = []
    if isinstance(strs,basestring):
        symbols = strs.split()
    elif isinstance(strs, collections.Iterable):
        symbols = list(strs)
    else:
        warnings.warn("Invalid argument to oneOf, expected string or iterable",
                SyntaxWarning, stacklevel=2)
    if not symbols:
        return NoMatch()

    i = 0
    while i < len(symbols)-1:
        cur = symbols[i]
        for j,other in enumerate(symbols[i+1:]):
            if ( isequal(other, cur) ):
                del symbols[i+j+1]
                break
            elif ( masks(cur, other) ):
                del symbols[i+j+1]
                symbols.insert(i,other)
                cur = other
                break
        else:
            i += 1

    if not caseless and useRegex:
        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
        try:
            if len(symbols)==len("".join(symbols)):
                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
            else:
                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
        except Exception:
            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
                    SyntaxWarning, stacklevel=2)


    # last resort, just use MatchFirst
    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))

def dictOf( key, value ):
    """
    Helper to easily and clearly define a dictionary by specifying the respective patterns
    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    in the proper order.  The key pattern can include delimiting markers or punctuation,
    as long as they are suppressed, thereby leaving the significant key text.  The value
    pattern can include named results, so that the C{Dict} results can include named token
    fields.

    Example::
        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        attr_label = label
        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

        # similar to Dict, but simpler call format
        result = dictOf(attr_label, attr_value).parseString(text)
        print(result.dump())
        print(result['shape'])
        print(result.shape)  # object attribute access works too
        print(result.asDict())
    prints::
        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        SQUARE
        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    """
    return Dict( ZeroOrMore( Group ( key + value ) ) )

def originalTextFor(expr, asString=True):
    """
    Helper to return the original, untokenized text for a given expression.  Useful to
    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    revert separate tokens with intervening whitespace back to the original matching
    input text. By default, returns astring containing the original parsed text.  
       
    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    C{L{ParseResults}} containing any results names that were originally matched, and a 
    single token containing the original matched text from the input string.  So if 
    the expression passed to C{L{originalTextFor}} contains expressions with defined
    results names, you must set C{asString} to C{False} if you want to preserve those
    results name values.

    Example::
        src = "this is test  bold text  normal text "
        for tag in ("b","i"):
            opener,closer = makeHTMLTags(tag)
            patt = originalTextFor(opener + SkipTo(closer) + closer)
            print(patt.searchString(src)[0])
    prints::
        [' bold text ']
        ['text']
    """
    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
    endlocMarker = locMarker.copy()
    endlocMarker.callPreparse = False
    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
    if asString:
        extractText = lambda s,l,t: s[t._original_start:t._original_end]
    else:
        def extractText(s,l,t):
            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
    matchExpr.setParseAction(extractText)
    matchExpr.ignoreExprs = expr.ignoreExprs
    return matchExpr

def ungroup(expr): 
    """
    Helper to undo pyparsing's default grouping of And expressions, even
    if all but one are non-empty.
    """
    return TokenConverter(expr).setParseAction(lambda t:t[0])

def locatedExpr(expr):
    """
    Helper to decorate a returned token with its starting and ending locations in the input string.
    This helper adds the following results names:
     - locn_start = location where matched expression begins
     - locn_end = location where matched expression ends
     - value = the actual parsed results

    Be careful if the input text contains C{} characters, you may want to call
    C{L{ParserElement.parseWithTabs}}

    Example::
        wd = Word(alphas)
        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
            print(match)
    prints::
        [[0, 'ljsdf', 5]]
        [[8, 'lksdjjf', 15]]
        [[18, 'lkkjj', 23]]
    """
    locator = Empty().setParseAction(lambda s,l,t: l)
    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))


# convenience constants for positional expressions
empty       = Empty().setName("empty")
lineStart   = LineStart().setName("lineStart")
lineEnd     = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd   = StringEnd().setName("stringEnd")

_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"

def srange(s):
    r"""
    Helper to easily define string ranges for use in Word construction.  Borrows
    syntax from regexp '[]' string range definitions::
        srange("[0-9]")   -> "0123456789"
        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    The input string must be enclosed in []'s, and the returned string is the expanded
    character set joined into a single string.
    The values enclosed in the []'s may be:
     - a single character
     - an escaped character with a leading backslash (such as C{\-} or C{\]})
     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
         (C{\0x##} is also supported for backwards compatibility) 
     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    """
    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
    try:
        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
    except Exception:
        return ""

def matchOnlyAtCol(n):
    """
    Helper method for defining parse actions that require matching at a specific
    column in the input text.
    """
    def verifyCol(strg,locn,toks):
        if col(locn,strg) != n:
            raise ParseException(strg,locn,"matched token not at column %d" % n)
    return verifyCol

def replaceWith(replStr):
    """
    Helper method for common parse actions that simply return a literal value.  Especially
    useful when used with C{L{transformString}()}.

    Example::
        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
        term = na | num
        
        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    """
    return lambda s,l,t: [replStr]

def removeQuotes(s,l,t):
    """
    Helper parse action for removing quotation marks from parsed quoted strings.

    Example::
        # by default, quotation marks are included in parsed results
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

        # use removeQuotes to strip quotation marks from parsed results
        quotedString.setParseAction(removeQuotes)
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    """
    return t[0][1:-1]

def tokenMap(func, *args):
    """
    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    args are passed, they are forwarded to the given function as additional arguments after
    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    parsed data to an integer using base 16.

    Example (compare the last to example in L{ParserElement.transformString}::
        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
        hex_ints.runTests('''
            00 11 22 aa FF 0a 0d 1a
            ''')
        
        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
        OneOrMore(upperword).runTests('''
            my kingdom for a horse
            ''')

        wd = Word(alphas).setParseAction(tokenMap(str.title))
        OneOrMore(wd).setParseAction(' '.join).runTests('''
            now is the winter of our discontent made glorious summer by this sun of york
            ''')
    prints::
        00 11 22 aa FF 0a 0d 1a
        [0, 17, 34, 170, 255, 10, 13, 26]

        my kingdom for a horse
        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

        now is the winter of our discontent made glorious summer by this sun of york
        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    """
    def pa(s,l,t):
        return [func(tokn, *args) for tokn in t]

    try:
        func_name = getattr(func, '__name__', 
                            getattr(func, '__class__').__name__)
    except Exception:
        func_name = str(func)
    pa.__name__ = func_name

    return pa

upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""

downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
    
def _makeTags(tagStr, xml):
    """Internal helper to construct opening and closing tag expressions, given a tag name"""
    if isinstance(tagStr,basestring):
        resname = tagStr
        tagStr = Keyword(tagStr, caseless=not xml)
    else:
        resname = tagStr.name

    tagAttrName = Word(alphas,alphanums+"_-:")
    if (xml):
        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
        openTag = Suppress("<") + tagStr("tag") + \
                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    else:
        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
        openTag = Suppress("<") + tagStr("tag") + \
                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
                Optional( Suppress("=") + tagAttrValue ) ))) + \
                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    closeTag = Combine(_L("")

    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
    openTag.tag = resname
    closeTag.tag = resname
    return openTag, closeTag

def makeHTMLTags(tagStr):
    """
    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

    Example::
        text = 'More info at the pyparsing wiki page'
        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
        a,a_end = makeHTMLTags("A")
        link_expr = a + SkipTo(a_end)("link_text") + a_end
        
        for link in link_expr.searchString(text):
            # attributes in the  tag (like "href" shown here) are also accessible as named results
            print(link.link_text, '->', link.href)
    prints::
        pyparsing -> http://pyparsing.wikispaces.com
    """
    return _makeTags( tagStr, False )

def makeXMLTags(tagStr):
    """
    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    tags only in the given upper/lower case.

    Example: similar to L{makeHTMLTags}
    """
    return _makeTags( tagStr, True )

def withAttribute(*args,**attrDict):
    """
    Helper to create a validating parse action to be used with start tags created
    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    with a required attribute value, to avoid false matches on common tags such as
    C{} or C{
}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k,v) for k,v in attrs] def pa(s,l,tokens): for attrName,attrValue in attrs: if attrName not in tokens: raise ParseException(s,l,"no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % (attrName, tokens[attrName], attrValue)) return pa withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): """ Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this <div> has no class
''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" return withAttribute(**{classattr : classname}) opAssoc = _Constants() opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """ Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction( pa ) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret operatorPrecedence = infixNotation """(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): """ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener,basestring) and isinstance(closer,basestring): if len(opener) == 1 and len(closer)==1: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS ).setParseAction(lambda t:t[0].strip())) else: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + ~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) ret.setName('nested %s%s expression' % (opener,closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): """ Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return curCol = col(l,s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: raise ParseFatalException(s,l,"illegal nesting") raise ParseException(s,l,"not a peer entry") def checkSubIndent(s,l,t): curCol = col(l,s) if curCol > indentStack[-1]: indentStack.append( curCol ) else: raise ParseException(s,l,"not a subentry") def checkUnindent(s,l,t): if l >= len(s): return curCol = col(l,s) if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): raise ParseException(s,l,"not an unindent") indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) else: smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) _htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") def replaceHTMLEntity(t): """Helper parser action to replace common HTML entities with their special characters""" return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") "Comment of the form C{/* ... */}" htmlComment = Regex(r"").setName("HTML comment") "Comment of the form C{}" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form C{// ... (to end of line)}" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") "Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" javaStyleComment = cppStyleComment "Same as C{L{cppStyleComment}}" pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form C{# ... (to end of line)}" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") """(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" # some other useful expressions - using lower-case class name since we are really using this as a namespace class pyparsing_common: """ Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers}, L{reals}, L{scientific notation}) - common L{programming identifiers} - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - ISO8601 L{dates} and L{datetime} - L{UUID} - L{comma-separated list} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] """ convertToInteger = tokenMap(int) """ Parse action for converting parsed integers to Python int """ convertToFloat = tokenMap(float) """ Parse action for converting parsed numbers to Python float """ integer = Word(nums).setName("integer").setParseAction(convertToInteger) """expression that parses an unsigned integer, returns an int""" hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) """expression that parses a hexadecimal integer, returns an int""" signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) """expression that parses an integer with optional leading sign, returns an int""" fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") """fractional expression of an integer divided by an integer, returns a float""" fraction.addParseAction(lambda t: t[0]/t[-1]) mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" mixed_integer.addParseAction(sum) real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) """expression that parses a floating point number and returns a float""" sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) """expression that parses a floating point number with optional scientific notation and returns a float""" # streamlining this expression makes the docs nicer-looking number = (sci_real | real | signed_integer).streamline() """any numeric expression, returns the corresponding Python type""" fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" identifier = Word(alphas+'_', alphanums+'_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") "IPv4 address (C{0.0.0.0 - 255.255.255.255})" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") "IPv6 address (long, short, or mixed form)" mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" @staticmethod def convertToDate(fmt="%Y-%m-%d"): """ Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn @staticmethod def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") "ISO8601 date (C{yyyy-mm-dd})" iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() @staticmethod def stripHTMLTags(s, l, tokens): """ Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = 'More info at the
pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' """ return pyparsing_common._html_stripper.transformString(tokens[0]) _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + Optional( White(" \t") ) ) ).streamline().setName("commaItem") comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) """Parse action to convert tokens to upper case.""" downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) """Parse action to convert tokens to lower case.""" if __name__ == "__main__": selectToken = CaselessLiteral("select") fromToken = CaselessLiteral("from") ident = Word(alphas, alphanums + "_$") columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) columnNameList = Group(delimitedList(columnName)).setName("columns") columnSpec = ('*' | columnNameList) tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) tableNameList = Group(delimitedList(tableName)).setName("tables") simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") # demo runTests method, including embedded comments in test string simpleSQL.runTests(""" # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual """) pyparsing_common.number.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) # any int or real number, returned as float pyparsing_common.fnumber.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) pyparsing_common.hex_integer.runTests(""" 100 FF """) import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(""" 12345678-1234-5678-1234-567812345678 """) PKtge[XMZuusix.pynu["""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson " __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer) PKtge[Z_Z___pycache__/six.cpython-36.pycnu[3 vhuI@srdZddlmZddlZddlZddlZddlZddlZdZdZ ej ddkZ ej ddkZ ej dddzkZ e refZefZefZeZeZejZnefZeefZeejfZeZeZejjd red|ZnLGd d d eZ ye!e Wn e"k r ed~ZYn XedZ[ ddZ#ddZ$GdddeZ%Gddde%Z&Gdddej'Z(Gddde%Z)GdddeZ*e*e+Z,Gddde(Z-e)ddd d!e)d"d#d$d%d"e)d&d#d#d'd&e)d(d)d$d*d(e)d+d)d,e)d-d#d$d.d-e)d/d0d0d1d/e)d2d0d0d/d2e)d3d)d$d4d3e)d5d)e rd6nd7d8e)d9d)d:e)d;de)d!d!d e)d?d?d@e)dAdAd@e)dBdBd@e)d4d)d$d4d3e)dCd#d$dDdCe)dEd#d#dFdEe&d$d)e&dGdHe&dIdJe&dKdLdMe&dNdOdNe&dPdQdRe&dSdTdUe&dVdWdXe&dYdZd[e&d\d]d^e&d_d`dae&dbdcdde&dedfdge&dhdidje&dkdkdle&dmdmdle&dndndle&dododpe&dqdre&dsdte&dudve&dwdxdwe&dydze&d{d|d}e&d~dde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&de+dde&de+dde&de+de+de&ddde&ddde&dddg>Z.ejdkrZe.e&ddg7Z.x:e.D]2Z/e0e-e/j1e/e2e/e&r`e,j3e/de/j1q`W[/e.e-_.e-e+dZ4e,j3e4dGddde(Z5e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)d>dde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddgZ6xe6D]Z/e0e5e/j1e/qW[/e6e5_.e,j3e5e+dddӃGddՄde(Z7e)ddde)ddde)dddgZ8xe8D]Z/e0e7e/j1e/q$W[/e8e7_.e,j3e7e+ddd܃Gddބde(Z9e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddg!Z:xe:D]Z/e0e9e/j1e/qW[/e:e9_.e,j3e9e+dddGddde(Z;e)ddde)ddde)d dde)d ddgZxe>D]Z/e0e=e/j1e/qW[/e>e=_.e,j3e=e+dddGdddej'Z?e,j3e?e+ddddZ@ddZAe rjdZBdZCdZDdZEdZFd ZGn$d!ZBd"ZCd#ZDd$ZEd%ZFd&ZGyeHZIWn"eJk rd'd(ZIYnXeIZHyeKZKWn"eJk rd)d*ZKYnXe rd+d,ZLejMZNd-d.ZOeZPn>d/d,ZLd0d1ZNd2d.ZOGd3d4d4eZPeKZKe#eLd5ejQeBZRejQeCZSejQeDZTejQeEZUejQeFZVejQeGZWe rd6d7ZXd8d9ZYd:d;ZZd<d=Z[ej\d>Z]ej\d?Z^ej\d@Z_nTdAd7ZXdBd9ZYdCd;ZZdDd=Z[ej\dEZ]ej\dFZ^ej\dGZ_e#eXdHe#eYdIe#eZdJe#e[dKe rdLdMZ`dNdOZaebZcddldZdedjedPjfZg[dejhdZiejjZkelZmddlnZnenjoZoenjpZpdQZqej d d k rdRZrdSZsn dTZrdUZsnjdVdMZ`dWdOZaecZcebZgdXdYZidZd[ZkejtejuevZmddloZoeojoZoZpd\ZqdRZrdSZse#e`d]e#ead^d_dQZwd`dTZxdadUZye reze4j{dbZ|ddcddZ}nddedfZ|e|dgej dddk re|dhn.ej dddk r8e|din djdkZ~eze4j{dldZedk rjdmdnZej dddk reZdodnZe#e}dpej dddk rejejfdqdrZnejZdsdtZdudvZdwdxZgZe+Zejdydk rge_ejrbx>eejD]0\ZZeej+dkr*ej1e+kr*eje=Pq*W[[ejje,dS(z6Utilities for writing code that runs on Python 2 and 3)absolute_importNz'Benjamin Peterson z1.10.0javac@seZdZddZdS)XcCsdS)Nrrl)selfr r /usr/lib/python3.6/six.py__len__>sz X.__len__N)__name__ __module__ __qualname__r r r r r r <sr ?cCs ||_dS)z Add documentation to a function.N)__doc__)funcdocr r r _add_docKsrcCst|tj|S)z7Import module, returning the module after the last dot.) __import__sysmodules)namer r r _import_modulePsrc@seZdZddZddZdS) _LazyDescrcCs ||_dS)N)r)r rr r r __init__Xsz_LazyDescr.__init__c CsB|j}t||j|yt|j|jWntk r<YnX|S)N)_resolvesetattrrdelattr __class__AttributeError)r objtpresultr r r __get__[sz_LazyDescr.__get__N)rrrrr%r r r r rVsrcs.eZdZdfdd ZddZddZZS) MovedModuleNcs2tt|j|tr(|dkr |}||_n||_dS)N)superr&rPY3mod)r roldnew)r r r ris zMovedModule.__init__cCs t|jS)N)rr))r r r r rrszMovedModule._resolvecCs"|j}t||}t||||S)N)rgetattrr)r attr_modulevaluer r r __getattr__us  zMovedModule.__getattr__)N)rrrrrr0 __classcell__r r )r r r&gs r&cs(eZdZfddZddZgZZS) _LazyModulecstt|j||jj|_dS)N)r'r2rr r)r r)r r r r~sz_LazyModule.__init__cCs ddg}|dd|jD7}|S)NrrcSsg|] }|jqSr )r).0r-r r r sz'_LazyModule.__dir__..)_moved_attributes)r Zattrsr r r __dir__sz_LazyModule.__dir__)rrrrr6r5r1r r )r r r2|s r2cs&eZdZdfdd ZddZZS)MovedAttributeNcsdtt|j|trH|dkr |}||_|dkr@|dkr<|}n|}||_n||_|dkrZ|}||_dS)N)r'r7rr(r)r-)r rZold_modZnew_modZold_attrZnew_attr)r r r rszMovedAttribute.__init__cCst|j}t||jS)N)rr)r,r-)r moduler r r rs zMovedAttribute._resolve)NN)rrrrrr1r r )r r r7sr7c@sVeZdZdZddZddZddZdd d Zd d Zd dZ ddZ ddZ e Z dS)_SixMetaPathImporterz A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 cCs||_i|_dS)N)r known_modules)r Zsix_module_namer r r rsz_SixMetaPathImporter.__init__cGs&x |D]}||j|jd|<qWdS)N.)r:r)r r)Z fullnamesfullnamer r r _add_modules z _SixMetaPathImporter._add_modulecCs|j|jd|S)Nr;)r:r)r r<r r r _get_modulesz _SixMetaPathImporter._get_moduleNcCs||jkr|SdS)N)r:)r r<pathr r r find_modules z _SixMetaPathImporter.find_modulec Cs0y |j|Stk r*td|YnXdS)Nz!This loader does not know module )r:KeyError ImportError)r r<r r r Z __get_modules z!_SixMetaPathImporter.__get_modulec CsRy tj|Stk rYnX|j|}t|tr>|j}n||_|tj|<|S)N)rrrA _SixMetaPathImporter__get_module isinstancer&r __loader__)r r<r)r r r load_modules     z _SixMetaPathImporter.load_modulecCst|j|dS)z Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) __path__)hasattrrC)r r<r r r is_packagesz_SixMetaPathImporter.is_packagecCs|j|dS)z;Return None Required, if is_package is implementedN)rC)r r<r r r get_codes z_SixMetaPathImporter.get_code)N) rrrrrr=r>r@rCrFrIrJ get_sourcer r r r r9s  r9c@seZdZdZgZdS) _MovedItemszLazy loading of moved objectsN)rrrrrGr r r r rLsrLZ cStringIOioStringIOfilter itertoolsbuiltinsZifilter filterfalseZ ifilterfalseinputZ __builtin__Z raw_inputinternrmapimapgetcwdosZgetcwdugetcwdbrangeZxrangeZ reload_module importlibZimpreloadreduce functoolsZ shlex_quoteZpipesZshlexZquoteUserDict collectionsUserList UserStringzipZizip zip_longestZ izip_longestZ configparserZ ConfigParsercopyregZcopy_regZdbm_gnuZgdbmzdbm.gnuZ _dummy_threadZ dummy_threadZhttp_cookiejarZ cookielibzhttp.cookiejarZ http_cookiesZCookiez http.cookiesZ html_entitiesZhtmlentitydefsz html.entitiesZ html_parserZ HTMLParserz html.parserZ http_clientZhttplibz http.clientZemail_mime_multipartzemail.MIMEMultipartzemail.mime.multipartZemail_mime_nonmultipartzemail.MIMENonMultipartzemail.mime.nonmultipartZemail_mime_textzemail.MIMETextzemail.mime.textZemail_mime_basezemail.MIMEBasezemail.mime.baseZBaseHTTPServerz http.serverZ CGIHTTPServerZSimpleHTTPServerZcPicklepickleZqueueZQueuereprlibreprZ socketserverZ SocketServer_threadZthreadZtkinterZTkinterZtkinter_dialogZDialogztkinter.dialogZtkinter_filedialogZ FileDialogztkinter.filedialogZtkinter_scrolledtextZ ScrolledTextztkinter.scrolledtextZtkinter_simpledialogZ SimpleDialogztkinter.simpledialogZ tkinter_tixZTixz tkinter.tixZ tkinter_ttkZttkz tkinter.ttkZtkinter_constantsZ Tkconstantsztkinter.constantsZ tkinter_dndZTkdndz tkinter.dndZtkinter_colorchooserZtkColorChooserztkinter.colorchooserZtkinter_commondialogZtkCommonDialogztkinter.commondialogZtkinter_tkfiledialogZ tkFileDialogZ tkinter_fontZtkFontz tkinter.fontZtkinter_messageboxZ tkMessageBoxztkinter.messageboxZtkinter_tksimpledialogZtkSimpleDialogZ urllib_parsez.moves.urllib_parsez urllib.parseZ urllib_errorz.moves.urllib_errorz urllib.errorZurllibz .moves.urllibZurllib_robotparser robotparserzurllib.robotparserZ xmlrpc_clientZ xmlrpclibz xmlrpc.clientZ xmlrpc_serverZSimpleXMLRPCServerz xmlrpc.serverZwin32winreg_winregzmoves.z.movesmovesc@seZdZdZdS)Module_six_moves_urllib_parsez7Lazy loading of moved objects in six.moves.urllib_parseN)rrrrr r r r rn@srnZ ParseResultZurlparseZ SplitResultZparse_qsZ parse_qslZ urldefragZurljoinZurlsplitZ urlunparseZ urlunsplitZ quote_plusZunquoteZ unquote_plusZ urlencodeZ splitqueryZsplittagZ splituserZ uses_fragmentZ uses_netlocZ uses_paramsZ uses_queryZ uses_relativezmoves.urllib_parsezmoves.urllib.parsec@seZdZdZdS)Module_six_moves_urllib_errorz7Lazy loading of moved objects in six.moves.urllib_errorN)rrrrr r r r rohsroZURLErrorZurllib2Z HTTPErrorZContentTooShortErrorz.moves.urllib.errorzmoves.urllib_errorzmoves.urllib.errorc@seZdZdZdS)Module_six_moves_urllib_requestz9Lazy loading of moved objects in six.moves.urllib_requestN)rrrrr r r r rp|srpZurlopenzurllib.requestZinstall_openerZ build_openerZ pathname2urlZ url2pathnameZ getproxiesZRequestZOpenerDirectorZHTTPDefaultErrorHandlerZHTTPRedirectHandlerZHTTPCookieProcessorZ ProxyHandlerZ BaseHandlerZHTTPPasswordMgrZHTTPPasswordMgrWithDefaultRealmZAbstractBasicAuthHandlerZHTTPBasicAuthHandlerZProxyBasicAuthHandlerZAbstractDigestAuthHandlerZHTTPDigestAuthHandlerZProxyDigestAuthHandlerZ HTTPHandlerZ HTTPSHandlerZ FileHandlerZ FTPHandlerZCacheFTPHandlerZUnknownHandlerZHTTPErrorProcessorZ urlretrieveZ urlcleanupZ URLopenerZFancyURLopenerZ proxy_bypassz.moves.urllib.requestzmoves.urllib_requestzmoves.urllib.requestc@seZdZdZdS) Module_six_moves_urllib_responsez:Lazy loading of moved objects in six.moves.urllib_responseN)rrrrr r r r rqsrqZaddbasezurllib.responseZ addclosehookZaddinfoZ addinfourlz.moves.urllib.responsezmoves.urllib_responsezmoves.urllib.responsec@seZdZdZdS)#Module_six_moves_urllib_robotparserz=Lazy loading of moved objects in six.moves.urllib_robotparserN)rrrrr r r r rrsrrZRobotFileParserz.moves.urllib.robotparserzmoves.urllib_robotparserzmoves.urllib.robotparserc@sNeZdZdZgZejdZejdZejdZ ejdZ ejdZ ddZ d S) Module_six_moves_urllibzICreate a six.moves.urllib namespace that resembles the Python 3 namespacezmoves.urllib_parsezmoves.urllib_errorzmoves.urllib_requestzmoves.urllib_responsezmoves.urllib_robotparsercCsdddddgS)Nparseerrorrequestresponserjr )r r r r r6szModule_six_moves_urllib.__dir__N) rrrrrG _importerr>rtrurvrwrjr6r r r r rss     rsz moves.urllibcCstt|j|dS)zAdd an item to six.moves.N)rrLr)Zmover r r add_movesrycCsXytt|WnDtk rRy tj|=Wn"tk rLtd|fYnXYnXdS)zRemove item from six.moves.zno such move, %rN)rrLr!rm__dict__rA)rr r r remove_moves r{__func____self__ __closure____code__ __defaults__ __globals__im_funcZim_selfZ func_closureZ func_codeZ func_defaultsZ func_globalscCs|jS)N)next)itr r r advance_iterator srcCstddt|jDS)Ncss|]}d|jkVqdS)__call__N)rz)r3klassr r r szcallable..)anytype__mro__)r"r r r callablesrcCs|S)Nr )unboundr r r get_unbound_functionsrcCs|S)Nr )rclsr r r create_unbound_methodsrcCs|jS)N)r)rr r r r"scCstj|||jS)N)types MethodTyper )rr"r r r create_bound_method%srcCstj|d|S)N)rr)rrr r r r(sc@seZdZddZdS)IteratorcCst|j|S)N)r__next__)r r r r r-sz Iterator.nextN)rrrrr r r r r+srz3Get the function out of a possibly unbound functioncKst|jf|S)N)iterkeys)dkwr r r iterkeys>srcKst|jf|S)N)rvalues)rrr r r itervaluesAsrcKst|jf|S)N)ritems)rrr r r iteritemsDsrcKst|jf|S)N)rZlists)rrr r r iterlistsGsrrrrcKs |jf|S)N)r)rrr r r rPscKs |jf|S)N)r)rrr r r rSscKs |jf|S)N)r)rrr r r rVscKs |jf|S)N)r)rrr r r rYsviewkeys viewvalues viewitemsz1Return an iterator over the keys of a dictionary.z3Return an iterator over the values of a dictionary.z?Return an iterator over the (key, value) pairs of a dictionary.zBReturn an iterator over the (key, [values]) pairs of a dictionary.cCs |jdS)Nzlatin-1)encode)sr r r bksrcCs|S)Nr )rr r r unsrz>BassertCountEqualZassertRaisesRegexpZassertRegexpMatchesassertRaisesRegex assertRegexcCs|S)Nr )rr r r rscCst|jdddS)Nz\\z\\\\Zunicode_escape)unicodereplace)rr r r rscCs t|dS)Nr)ord)Zbsr r r byte2intsrcCs t||S)N)r)Zbufir r r indexbytessrZassertItemsEqualz Byte literalz Text literalcOst|t||S)N)r,_assertCountEqual)r argskwargsr r r rscOst|t||S)N)r,_assertRaisesRegex)r rrr r r rscOst|t||S)N)r, _assertRegex)r rrr r r rsexeccCs*|dkr|}|j|k r"|j||dS)N) __traceback__with_traceback)r#r/tbr r r reraises   rcCsB|dkr*tjd}|j}|dkr&|j}~n |dkr6|}tddS)zExecute code in a namespace.Nrzexec _code_ in _globs_, _locs_)r _getframe f_globalsf_localsr)Z_code_Z_globs_Z_locs_framer r r exec_s rz9def reraise(tp, value, tb=None): raise tp, value, tb zrdef raise_from(value, from_value): if from_value is None: raise value raise value from from_value zCdef raise_from(value, from_value): raise value from from_value cCs|dS)Nr )r/Z from_valuer r r raise_fromsrprintc s6|jdtjdkrdSfdd}d}|jdd}|dk r`t|trNd}nt|ts`td|jd d}|dk rt|trd}nt|tstd |rtd |sx|D]}t|trd}PqW|rtd }td }nd }d }|dkr|}|dkr|}x,t|D] \} }| r||||qW||dS)z4The new-style print function for Python 2.4 and 2.5.fileNcsdt|tst|}ttrVt|trVjdk rVtdd}|dkrHd}|jj|}j|dS)Nerrorsstrict) rD basestringstrrrencodingr,rwrite)datar)fpr r rs     zprint_..writeFsepTzsep must be None or a stringendzend must be None or a stringz$invalid keyword arguments to print()  )poprstdoutrDrr TypeError enumerate) rrrZ want_unicoderrargnewlineZspacerr )rr print_sL           rcOs<|jdtj}|jdd}t|||r8|dk r8|jdS)NrflushF)getrrr_printr)rrrrr r r r s    zReraise an exception.csfdd}|S)Ncstj|}|_|S)N)r^wraps __wrapped__)f)assignedupdatedwrappedr r wrapperszwraps..wrapperr )rrrrr )rrrr rsrcs&Gfddd}tj|dfiS)z%Create a base class with a metaclass.cseZdZfddZdS)z!with_metaclass..metaclasscs ||S)Nr )rrZ this_basesr)basesmetar r __new__'sz)with_metaclass..metaclass.__new__N)rrrrr )rrr r metaclass%srZtemporary_class)rr)rrrr )rrr with_metaclass srcsfdd}|S)z6Class decorator for creating a class with a metaclass.csl|jj}|jd}|dk rDt|tr,|g}x|D]}|j|q2W|jdd|jdd|j|j|S)N __slots__rz __weakref__)rzcopyrrDrrr __bases__)rZ orig_varsslotsZ slots_var)rr r r.s      zadd_metaclass..wrapperr )rrr )rr add_metaclass,s rcCs2tr.d|jkrtd|j|j|_dd|_|S)a A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. __str__zY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cSs|jjdS)Nzutf-8) __unicode__r)r r r r Jsz-python_2_unicode_compatible..)PY2rz ValueErrorrrr)rr r r python_2_unicode_compatible<s   r__spec__)rrlilill)N)NN)rr)rr)rr)rr)rZ __future__rr^rPoperatorrr __author__ __version__ version_inforr(ZPY34rZ string_typesintZ integer_typesrZ class_typesZ text_typebytesZ binary_typemaxsizeZMAXSIZErZlongZ ClassTyperplatform startswithobjectr len OverflowErrorrrrr& ModuleTyper2r7r9rrxrLr5r-rrrDr=rmrnZ_urllib_parse_moved_attributesroZ_urllib_error_moved_attributesrpZ _urllib_request_moved_attributesrqZ!_urllib_response_moved_attributesrrZ$_urllib_robotparser_moved_attributesrsryr{Z _meth_funcZ _meth_selfZ _func_closureZ _func_codeZ_func_defaultsZ _func_globalsrr NameErrorrrrrrr attrgetterZget_method_functionZget_method_selfZget_function_closureZget_function_codeZget_function_defaultsZget_function_globalsrrrr methodcallerrrrrrchrZunichrstructStructpackZint2byte itemgetterrgetitemrrZ iterbytesrMrNBytesIOrrrpartialrVrrrrr,rQrrrrrWRAPPER_ASSIGNMENTSWRAPPER_UPDATESrrrrrG __package__globalsrrsubmodule_search_locations meta_pathrrZimporterappendr r r r s     >                                                                                                                                                          5     PKtge[-Kqq#__pycache__/__init__.cpython-36.pycnu[3 vh@sdS)Nrrr/usr/lib/python3.6/__init__.pysPKtge[-Kqq)__pycache__/__init__.cpython-36.opt-1.pycnu[3 vh@sdS)Nrrr/usr/lib/python3.6/__init__.pysPKtge[Z_Z_$__pycache__/six.cpython-36.opt-1.pycnu[3 vhuI@srdZddlmZddlZddlZddlZddlZddlZdZdZ ej ddkZ ej ddkZ ej dddzkZ e refZefZefZeZeZejZnefZeefZeejfZeZeZejjd red|ZnLGd d d eZ ye!e Wn e"k r ed~ZYn XedZ[ ddZ#ddZ$GdddeZ%Gddde%Z&Gdddej'Z(Gddde%Z)GdddeZ*e*e+Z,Gddde(Z-e)ddd d!e)d"d#d$d%d"e)d&d#d#d'd&e)d(d)d$d*d(e)d+d)d,e)d-d#d$d.d-e)d/d0d0d1d/e)d2d0d0d/d2e)d3d)d$d4d3e)d5d)e rd6nd7d8e)d9d)d:e)d;de)d!d!d e)d?d?d@e)dAdAd@e)dBdBd@e)d4d)d$d4d3e)dCd#d$dDdCe)dEd#d#dFdEe&d$d)e&dGdHe&dIdJe&dKdLdMe&dNdOdNe&dPdQdRe&dSdTdUe&dVdWdXe&dYdZd[e&d\d]d^e&d_d`dae&dbdcdde&dedfdge&dhdidje&dkdkdle&dmdmdle&dndndle&dododpe&dqdre&dsdte&dudve&dwdxdwe&dydze&d{d|d}e&d~dde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&de+dde&de+dde&de+de+de&ddde&ddde&dddg>Z.ejdkrZe.e&ddg7Z.x:e.D]2Z/e0e-e/j1e/e2e/e&r`e,j3e/de/j1q`W[/e.e-_.e-e+dZ4e,j3e4dGddde(Z5e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)d>dde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddgZ6xe6D]Z/e0e5e/j1e/qW[/e6e5_.e,j3e5e+dddӃGddՄde(Z7e)ddde)ddde)dddgZ8xe8D]Z/e0e7e/j1e/q$W[/e8e7_.e,j3e7e+ddd܃Gddބde(Z9e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddg!Z:xe:D]Z/e0e9e/j1e/qW[/e:e9_.e,j3e9e+dddGddde(Z;e)ddde)ddde)d dde)d ddgZxe>D]Z/e0e=e/j1e/qW[/e>e=_.e,j3e=e+dddGdddej'Z?e,j3e?e+ddddZ@ddZAe rjdZBdZCdZDdZEdZFd ZGn$d!ZBd"ZCd#ZDd$ZEd%ZFd&ZGyeHZIWn"eJk rd'd(ZIYnXeIZHyeKZKWn"eJk rd)d*ZKYnXe rd+d,ZLejMZNd-d.ZOeZPn>d/d,ZLd0d1ZNd2d.ZOGd3d4d4eZPeKZKe#eLd5ejQeBZRejQeCZSejQeDZTejQeEZUejQeFZVejQeGZWe rd6d7ZXd8d9ZYd:d;ZZd<d=Z[ej\d>Z]ej\d?Z^ej\d@Z_nTdAd7ZXdBd9ZYdCd;ZZdDd=Z[ej\dEZ]ej\dFZ^ej\dGZ_e#eXdHe#eYdIe#eZdJe#e[dKe rdLdMZ`dNdOZaebZcddldZdedjedPjfZg[dejhdZiejjZkelZmddlnZnenjoZoenjpZpdQZqej d d k rdRZrdSZsn dTZrdUZsnjdVdMZ`dWdOZaecZcebZgdXdYZidZd[ZkejtejuevZmddloZoeojoZoZpd\ZqdRZrdSZse#e`d]e#ead^d_dQZwd`dTZxdadUZye reze4j{dbZ|ddcddZ}nddedfZ|e|dgej dddk re|dhn.ej dddk r8e|din djdkZ~eze4j{dldZedk rjdmdnZej dddk reZdodnZe#e}dpej dddk rejejfdqdrZnejZdsdtZdudvZdwdxZgZe+Zejdydk rge_ejrbx>eejD]0\ZZeej+dkr*ej1e+kr*eje=Pq*W[[ejje,dS(z6Utilities for writing code that runs on Python 2 and 3)absolute_importNz'Benjamin Peterson z1.10.0javac@seZdZddZdS)XcCsdS)Nrrl)selfr r /usr/lib/python3.6/six.py__len__>sz X.__len__N)__name__ __module__ __qualname__r r r r r r <sr ?cCs ||_dS)z Add documentation to a function.N)__doc__)funcdocr r r _add_docKsrcCst|tj|S)z7Import module, returning the module after the last dot.) __import__sysmodules)namer r r _import_modulePsrc@seZdZddZddZdS) _LazyDescrcCs ||_dS)N)r)r rr r r __init__Xsz_LazyDescr.__init__c CsB|j}t||j|yt|j|jWntk r<YnX|S)N)_resolvesetattrrdelattr __class__AttributeError)r objtpresultr r r __get__[sz_LazyDescr.__get__N)rrrrr%r r r r rVsrcs.eZdZdfdd ZddZddZZS) MovedModuleNcs2tt|j|tr(|dkr |}||_n||_dS)N)superr&rPY3mod)r roldnew)r r r ris zMovedModule.__init__cCs t|jS)N)rr))r r r r rrszMovedModule._resolvecCs"|j}t||}t||||S)N)rgetattrr)r attr_modulevaluer r r __getattr__us  zMovedModule.__getattr__)N)rrrrrr0 __classcell__r r )r r r&gs r&cs(eZdZfddZddZgZZS) _LazyModulecstt|j||jj|_dS)N)r'r2rr r)r r)r r r r~sz_LazyModule.__init__cCs ddg}|dd|jD7}|S)NrrcSsg|] }|jqSr )r).0r-r r r sz'_LazyModule.__dir__..)_moved_attributes)r Zattrsr r r __dir__sz_LazyModule.__dir__)rrrrr6r5r1r r )r r r2|s r2cs&eZdZdfdd ZddZZS)MovedAttributeNcsdtt|j|trH|dkr |}||_|dkr@|dkr<|}n|}||_n||_|dkrZ|}||_dS)N)r'r7rr(r)r-)r rZold_modZnew_modZold_attrZnew_attr)r r r rszMovedAttribute.__init__cCst|j}t||jS)N)rr)r,r-)r moduler r r rs zMovedAttribute._resolve)NN)rrrrrr1r r )r r r7sr7c@sVeZdZdZddZddZddZdd d Zd d Zd dZ ddZ ddZ e Z dS)_SixMetaPathImporterz A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 cCs||_i|_dS)N)r known_modules)r Zsix_module_namer r r rsz_SixMetaPathImporter.__init__cGs&x |D]}||j|jd|<qWdS)N.)r:r)r r)Z fullnamesfullnamer r r _add_modules z _SixMetaPathImporter._add_modulecCs|j|jd|S)Nr;)r:r)r r<r r r _get_modulesz _SixMetaPathImporter._get_moduleNcCs||jkr|SdS)N)r:)r r<pathr r r find_modules z _SixMetaPathImporter.find_modulec Cs0y |j|Stk r*td|YnXdS)Nz!This loader does not know module )r:KeyError ImportError)r r<r r r Z __get_modules z!_SixMetaPathImporter.__get_modulec CsRy tj|Stk rYnX|j|}t|tr>|j}n||_|tj|<|S)N)rrrA _SixMetaPathImporter__get_module isinstancer&r __loader__)r r<r)r r r load_modules     z _SixMetaPathImporter.load_modulecCst|j|dS)z Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) __path__)hasattrrC)r r<r r r is_packagesz_SixMetaPathImporter.is_packagecCs|j|dS)z;Return None Required, if is_package is implementedN)rC)r r<r r r get_codes z_SixMetaPathImporter.get_code)N) rrrrrr=r>r@rCrFrIrJ get_sourcer r r r r9s  r9c@seZdZdZgZdS) _MovedItemszLazy loading of moved objectsN)rrrrrGr r r r rLsrLZ cStringIOioStringIOfilter itertoolsbuiltinsZifilter filterfalseZ ifilterfalseinputZ __builtin__Z raw_inputinternrmapimapgetcwdosZgetcwdugetcwdbrangeZxrangeZ reload_module importlibZimpreloadreduce functoolsZ shlex_quoteZpipesZshlexZquoteUserDict collectionsUserList UserStringzipZizip zip_longestZ izip_longestZ configparserZ ConfigParsercopyregZcopy_regZdbm_gnuZgdbmzdbm.gnuZ _dummy_threadZ dummy_threadZhttp_cookiejarZ cookielibzhttp.cookiejarZ http_cookiesZCookiez http.cookiesZ html_entitiesZhtmlentitydefsz html.entitiesZ html_parserZ HTMLParserz html.parserZ http_clientZhttplibz http.clientZemail_mime_multipartzemail.MIMEMultipartzemail.mime.multipartZemail_mime_nonmultipartzemail.MIMENonMultipartzemail.mime.nonmultipartZemail_mime_textzemail.MIMETextzemail.mime.textZemail_mime_basezemail.MIMEBasezemail.mime.baseZBaseHTTPServerz http.serverZ CGIHTTPServerZSimpleHTTPServerZcPicklepickleZqueueZQueuereprlibreprZ socketserverZ SocketServer_threadZthreadZtkinterZTkinterZtkinter_dialogZDialogztkinter.dialogZtkinter_filedialogZ FileDialogztkinter.filedialogZtkinter_scrolledtextZ ScrolledTextztkinter.scrolledtextZtkinter_simpledialogZ SimpleDialogztkinter.simpledialogZ tkinter_tixZTixz tkinter.tixZ tkinter_ttkZttkz tkinter.ttkZtkinter_constantsZ Tkconstantsztkinter.constantsZ tkinter_dndZTkdndz tkinter.dndZtkinter_colorchooserZtkColorChooserztkinter.colorchooserZtkinter_commondialogZtkCommonDialogztkinter.commondialogZtkinter_tkfiledialogZ tkFileDialogZ tkinter_fontZtkFontz tkinter.fontZtkinter_messageboxZ tkMessageBoxztkinter.messageboxZtkinter_tksimpledialogZtkSimpleDialogZ urllib_parsez.moves.urllib_parsez urllib.parseZ urllib_errorz.moves.urllib_errorz urllib.errorZurllibz .moves.urllibZurllib_robotparser robotparserzurllib.robotparserZ xmlrpc_clientZ xmlrpclibz xmlrpc.clientZ xmlrpc_serverZSimpleXMLRPCServerz xmlrpc.serverZwin32winreg_winregzmoves.z.movesmovesc@seZdZdZdS)Module_six_moves_urllib_parsez7Lazy loading of moved objects in six.moves.urllib_parseN)rrrrr r r r rn@srnZ ParseResultZurlparseZ SplitResultZparse_qsZ parse_qslZ urldefragZurljoinZurlsplitZ urlunparseZ urlunsplitZ quote_plusZunquoteZ unquote_plusZ urlencodeZ splitqueryZsplittagZ splituserZ uses_fragmentZ uses_netlocZ uses_paramsZ uses_queryZ uses_relativezmoves.urllib_parsezmoves.urllib.parsec@seZdZdZdS)Module_six_moves_urllib_errorz7Lazy loading of moved objects in six.moves.urllib_errorN)rrrrr r r r rohsroZURLErrorZurllib2Z HTTPErrorZContentTooShortErrorz.moves.urllib.errorzmoves.urllib_errorzmoves.urllib.errorc@seZdZdZdS)Module_six_moves_urllib_requestz9Lazy loading of moved objects in six.moves.urllib_requestN)rrrrr r r r rp|srpZurlopenzurllib.requestZinstall_openerZ build_openerZ pathname2urlZ url2pathnameZ getproxiesZRequestZOpenerDirectorZHTTPDefaultErrorHandlerZHTTPRedirectHandlerZHTTPCookieProcessorZ ProxyHandlerZ BaseHandlerZHTTPPasswordMgrZHTTPPasswordMgrWithDefaultRealmZAbstractBasicAuthHandlerZHTTPBasicAuthHandlerZProxyBasicAuthHandlerZAbstractDigestAuthHandlerZHTTPDigestAuthHandlerZProxyDigestAuthHandlerZ HTTPHandlerZ HTTPSHandlerZ FileHandlerZ FTPHandlerZCacheFTPHandlerZUnknownHandlerZHTTPErrorProcessorZ urlretrieveZ urlcleanupZ URLopenerZFancyURLopenerZ proxy_bypassz.moves.urllib.requestzmoves.urllib_requestzmoves.urllib.requestc@seZdZdZdS) Module_six_moves_urllib_responsez:Lazy loading of moved objects in six.moves.urllib_responseN)rrrrr r r r rqsrqZaddbasezurllib.responseZ addclosehookZaddinfoZ addinfourlz.moves.urllib.responsezmoves.urllib_responsezmoves.urllib.responsec@seZdZdZdS)#Module_six_moves_urllib_robotparserz=Lazy loading of moved objects in six.moves.urllib_robotparserN)rrrrr r r r rrsrrZRobotFileParserz.moves.urllib.robotparserzmoves.urllib_robotparserzmoves.urllib.robotparserc@sNeZdZdZgZejdZejdZejdZ ejdZ ejdZ ddZ d S) Module_six_moves_urllibzICreate a six.moves.urllib namespace that resembles the Python 3 namespacezmoves.urllib_parsezmoves.urllib_errorzmoves.urllib_requestzmoves.urllib_responsezmoves.urllib_robotparsercCsdddddgS)Nparseerrorrequestresponserjr )r r r r r6szModule_six_moves_urllib.__dir__N) rrrrrG _importerr>rtrurvrwrjr6r r r r rss     rsz moves.urllibcCstt|j|dS)zAdd an item to six.moves.N)rrLr)Zmover r r add_movesrycCsXytt|WnDtk rRy tj|=Wn"tk rLtd|fYnXYnXdS)zRemove item from six.moves.zno such move, %rN)rrLr!rm__dict__rA)rr r r remove_moves r{__func____self__ __closure____code__ __defaults__ __globals__im_funcZim_selfZ func_closureZ func_codeZ func_defaultsZ func_globalscCs|jS)N)next)itr r r advance_iterator srcCstddt|jDS)Ncss|]}d|jkVqdS)__call__N)rz)r3klassr r r szcallable..)anytype__mro__)r"r r r callablesrcCs|S)Nr )unboundr r r get_unbound_functionsrcCs|S)Nr )rclsr r r create_unbound_methodsrcCs|jS)N)r)rr r r r"scCstj|||jS)N)types MethodTyper )rr"r r r create_bound_method%srcCstj|d|S)N)rr)rrr r r r(sc@seZdZddZdS)IteratorcCst|j|S)N)r__next__)r r r r r-sz Iterator.nextN)rrrrr r r r r+srz3Get the function out of a possibly unbound functioncKst|jf|S)N)iterkeys)dkwr r r iterkeys>srcKst|jf|S)N)rvalues)rrr r r itervaluesAsrcKst|jf|S)N)ritems)rrr r r iteritemsDsrcKst|jf|S)N)rZlists)rrr r r iterlistsGsrrrrcKs |jf|S)N)r)rrr r r rPscKs |jf|S)N)r)rrr r r rSscKs |jf|S)N)r)rrr r r rVscKs |jf|S)N)r)rrr r r rYsviewkeys viewvalues viewitemsz1Return an iterator over the keys of a dictionary.z3Return an iterator over the values of a dictionary.z?Return an iterator over the (key, value) pairs of a dictionary.zBReturn an iterator over the (key, [values]) pairs of a dictionary.cCs |jdS)Nzlatin-1)encode)sr r r bksrcCs|S)Nr )rr r r unsrz>BassertCountEqualZassertRaisesRegexpZassertRegexpMatchesassertRaisesRegex assertRegexcCs|S)Nr )rr r r rscCst|jdddS)Nz\\z\\\\Zunicode_escape)unicodereplace)rr r r rscCs t|dS)Nr)ord)Zbsr r r byte2intsrcCs t||S)N)r)Zbufir r r indexbytessrZassertItemsEqualz Byte literalz Text literalcOst|t||S)N)r,_assertCountEqual)r argskwargsr r r rscOst|t||S)N)r,_assertRaisesRegex)r rrr r r rscOst|t||S)N)r, _assertRegex)r rrr r r rsexeccCs*|dkr|}|j|k r"|j||dS)N) __traceback__with_traceback)r#r/tbr r r reraises   rcCsB|dkr*tjd}|j}|dkr&|j}~n |dkr6|}tddS)zExecute code in a namespace.Nrzexec _code_ in _globs_, _locs_)r _getframe f_globalsf_localsr)Z_code_Z_globs_Z_locs_framer r r exec_s rz9def reraise(tp, value, tb=None): raise tp, value, tb zrdef raise_from(value, from_value): if from_value is None: raise value raise value from from_value zCdef raise_from(value, from_value): raise value from from_value cCs|dS)Nr )r/Z from_valuer r r raise_fromsrprintc s6|jdtjdkrdSfdd}d}|jdd}|dk r`t|trNd}nt|ts`td|jd d}|dk rt|trd}nt|tstd |rtd |sx|D]}t|trd}PqW|rtd }td }nd }d }|dkr|}|dkr|}x,t|D] \} }| r||||qW||dS)z4The new-style print function for Python 2.4 and 2.5.fileNcsdt|tst|}ttrVt|trVjdk rVtdd}|dkrHd}|jj|}j|dS)Nerrorsstrict) rD basestringstrrrencodingr,rwrite)datar)fpr r rs     zprint_..writeFsepTzsep must be None or a stringendzend must be None or a stringz$invalid keyword arguments to print()  )poprstdoutrDrr TypeError enumerate) rrrZ want_unicoderrargnewlineZspacerr )rr print_sL           rcOs<|jdtj}|jdd}t|||r8|dk r8|jdS)NrflushF)getrrr_printr)rrrrr r r r s    zReraise an exception.csfdd}|S)Ncstj|}|_|S)N)r^wraps __wrapped__)f)assignedupdatedwrappedr r wrapperszwraps..wrapperr )rrrrr )rrrr rsrcs&Gfddd}tj|dfiS)z%Create a base class with a metaclass.cseZdZfddZdS)z!with_metaclass..metaclasscs ||S)Nr )rrZ this_basesr)basesmetar r __new__'sz)with_metaclass..metaclass.__new__N)rrrrr )rrr r metaclass%srZtemporary_class)rr)rrrr )rrr with_metaclass srcsfdd}|S)z6Class decorator for creating a class with a metaclass.csl|jj}|jd}|dk rDt|tr,|g}x|D]}|j|q2W|jdd|jdd|j|j|S)N __slots__rz __weakref__)rzcopyrrDrrr __bases__)rZ orig_varsslotsZ slots_var)rr r r.s      zadd_metaclass..wrapperr )rrr )rr add_metaclass,s rcCs2tr.d|jkrtd|j|j|_dd|_|S)a A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. __str__zY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cSs|jjdS)Nzutf-8) __unicode__r)r r r r Jsz-python_2_unicode_compatible..)PY2rz ValueErrorrrr)rr r r python_2_unicode_compatible<s   r__spec__)rrlilill)N)NN)rr)rr)rr)rr)rZ __future__rr^rPoperatorrr __author__ __version__ version_inforr(ZPY34rZ string_typesintZ integer_typesrZ class_typesZ text_typebytesZ binary_typemaxsizeZMAXSIZErZlongZ ClassTyperplatform startswithobjectr len OverflowErrorrrrr& ModuleTyper2r7r9rrxrLr5r-rrrDr=rmrnZ_urllib_parse_moved_attributesroZ_urllib_error_moved_attributesrpZ _urllib_request_moved_attributesrqZ!_urllib_response_moved_attributesrrZ$_urllib_robotparser_moved_attributesrsryr{Z _meth_funcZ _meth_selfZ _func_closureZ _func_codeZ_func_defaultsZ _func_globalsrr NameErrorrrrrrr attrgetterZget_method_functionZget_method_selfZget_function_closureZget_function_codeZget_function_defaultsZget_function_globalsrrrr methodcallerrrrrrchrZunichrstructStructpackZint2byte itemgetterrgetitemrrZ iterbytesrMrNBytesIOrrrpartialrVrrrrr,rQrrrrrWRAPPER_ASSIGNMENTSWRAPPER_UPDATESrrrrrG __package__globalsrrsubmodule_search_locations meta_pathrrZimporterappendr r r r s     >                                                                                                                                                          5     PKtge[0 KK*__pycache__/pyparsing.cpython-36.opt-1.pycnu[3 vh@s dZdZdZdZddlZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlmZyddlmZWn ek rddlmZYnXydd l mZWn>ek rydd lmZWnek rdZYnXYnXd d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;dd?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgiZee jddsZeddskZer"e jZe Z!e"Z#e Z$e%e&e'e(e)ee*e+e,e-e.g Z/nbe j0Ze1Z2dtduZ$gZ/ddl3Z3xBdvj4D]6Z5ye/j6e7e3e5Wne8k r|wJYnXqJWe9dwdxe2dyDZ:dzd{Z;Gd|d}d}e<Z=ej>ej?Z@d~ZAeAdZBe@eAZCe"dZDdjEddxejFDZGGdd!d!eHZIGdd#d#eIZJGdd%d%eIZKGdd'd'eKZLGdd*d*eHZMGddde<ZNGdd&d&e<ZOe jPjQeOdd=ZRddNZSddKZTddZUddZVddZWddUZXd/ddZYGdd(d(e<ZZGdd0d0eZZ[Gddde[Z\Gddde[Z]Gddde[Z^e^Z_e^eZ_`Gddde[ZaGdd d e^ZbGdd d eaZcGddpdpe[ZdGdd3d3e[ZeGdd+d+e[ZfGdd)d)e[ZgGdd d e[ZhGdd2d2e[ZiGddde[ZjGdddejZkGdddejZlGdddejZmGdd.d.ejZnGdd-d-ejZoGdd5d5ejZpGdd4d4ejZqGdd$d$eZZrGdd d erZsGdd d erZtGddderZuGddderZvGdd"d"eZZwGdddewZxGdddewZyGdddewZzGdddezZ{Gdd6d6ezZ|Gddde<Z}e}Z~GdddewZGdd,d,ewZGdddewZGdddeZGdd1d1ewZGdddeZGdddeZGdddeZGdd/d/eZGddde<ZddfZd0ddDZd1dd@Zdd΄ZddSZddRZdd҄Zd2ddWZddEZd3ddkZddlZddnZe\jdGZeljdMZemjdLZenjdeZeojddZeeeDdddڍjdd܄Zefd݃jdd܄Zefd߃jdd܄ZeeBeBeeeGddydBefde jBZeeedeZe^dedjdee{eeBjddZddcZddQZdd`Zdd^ZddqZedd܄Zedd܄ZddZddOZddPZddiZe<e_d4ddoZe=Ze<e_e<e_ededfddmZeZeefddjdZeefddjdZeefddefddBjdZee_dejjdZdddejfddTZd5ddjZedZedZeeee@eCdjd\ZZeed j4d Zefd d jEejÃd jdZĐdd_ZeefddjdZefdjdZefdjȃjdZefdjdZeefddeBjdZeZefdjdZee{eeeGdɐdeeede^dɃemj΃jdZeeejeBddjd>ZGd drdrZeҐd!k rebd"Zebd#Zeee@eCd$ZeeՐd%dӐd&jeZeeeփjd'Zאd(eBZeeՐd%dӐd&jeZeeeكjd)ZeӐd*eؐd'eeڐd)Zejܐd+ejjܐd,ejjܐd,ejjܐd-ddlZejjeejejjܐd.dS(6aS pyparsing module - Classes and methods to define and execute parsing grammars The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements (L{'+'} operator gives L{And} expressions, strings are auto-converted to L{Literal} expressions):: from pyparsing import Word, alphas # define grammar of a greeting greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments z2.1.10z07 Oct 2016 01:31 UTCz*Paul McGuire N)ref)datetime)RLock) OrderedDictAndCaselessKeywordCaselessLiteral CharsNotInCombineDictEachEmpty FollowedByForward GoToColumnGroupKeywordLineEnd LineStartLiteral MatchFirstNoMatchNotAny OneOrMoreOnlyOnceOptionalOrParseBaseExceptionParseElementEnhanceParseExceptionParseExpressionParseFatalException ParseResultsParseSyntaxException ParserElement QuotedStringRecursiveGrammarExceptionRegexSkipTo StringEnd StringStartSuppressTokenTokenConverterWhiteWordWordEnd WordStart ZeroOrMore alphanumsalphas alphas8bit anyCloseTag anyOpenTag cStyleCommentcolcommaSeparatedListcommonHTMLEntity countedArraycppStyleCommentdblQuotedStringdblSlashComment delimitedListdictOfdowncaseTokensemptyhexnums htmlCommentjavaStyleCommentlinelineEnd lineStartlineno makeHTMLTags makeXMLTagsmatchOnlyAtColmatchPreviousExprmatchPreviousLiteral nestedExprnullDebugActionnumsoneOfopAssocoperatorPrecedence printablespunc8bitpythonStyleComment quotedString removeQuotesreplaceHTMLEntity replaceWith restOfLinesglQuotedStringsrange stringEnd stringStarttraceParseAction unicodeString upcaseTokens withAttribute indentedBlockoriginalTextForungroup infixNotation locatedExpr withClass CloseMatchtokenMappyparsing_commonc Cs`t|tr|Syt|Stk rZt|jtjd}td}|jdd|j |SXdS)aDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It then < returns the unicode object | encodes it with the default encoding | ... >. xmlcharrefreplacez&#\d+;cSs$dtt|dddddS)Nz\ur)hexint)trw/usr/lib/python3.6/pyparsing.pysz_ustr..N) isinstanceZunicodestrUnicodeEncodeErrorencodesysgetdefaultencodingr'setParseActiontransformString)objretZ xmlcharrefrwrwrx_ustrs rz6sum len sorted reversed list tuple set any all min maxccs|] }|VqdS)Nrw).0yrwrwrx srrrcCs>d}dddjD}x"t||D]\}}|j||}q"W|S)z/Escape &, <, >, ", ', etc. in a string of data.z&><"'css|]}d|dVqdS)&;Nrw)rsrwrwrxrsz_xml_escape..zamp gt lt quot apos)splitzipreplace)dataZ from_symbolsZ to_symbolsZfrom_Zto_rwrwrx _xml_escapes rc@s eZdZdS) _ConstantsN)__name__ __module__ __qualname__rwrwrwrxrsr 0123456789Z ABCDEFabcdef\ccs|]}|tjkr|VqdS)N)stringZ whitespace)rcrwrwrxrsc@sPeZdZdZdddZeddZdd Zd d Zd d Z dddZ ddZ dS)rz7base exception class for all parsing runtime exceptionsrNcCs>||_|dkr||_d|_n ||_||_||_|||f|_dS)Nr)locmsgpstr parserElementargs)selfrrrelemrwrwrx__init__szParseBaseException.__init__cCs||j|j|j|jS)z internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses )rrrr)clsperwrwrx_from_exceptionsz"ParseBaseException._from_exceptioncCsN|dkrt|j|jS|dkr,t|j|jS|dkrBt|j|jSt|dS)zsupported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text rJr9columnrGN)r9r)rJrrr9rGAttributeError)rZanamerwrwrx __getattr__szParseBaseException.__getattr__cCsd|j|j|j|jfS)Nz"%s (at char %d), (line:%d, col:%d))rrrJr)rrwrwrx__str__szParseBaseException.__str__cCst|S)N)r)rrwrwrx__repr__szParseBaseException.__repr__>!} ('-' operator) indicates that parsing is to stop immediately because an unbacktrackable syntax error has been foundN)rrrrrwrwrwrxr#sc@s eZdZdZddZddZdS)r&zZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs ||_dS)N)parseElementTrace)rparseElementListrwrwrxrsz"RecursiveGrammarException.__init__cCs d|jS)NzRecursiveGrammarException: %s)r)rrwrwrxr sz!RecursiveGrammarException.__str__N)rrrrrrrwrwrwrxr&sc@s,eZdZddZddZddZddZd S) _ParseResultsWithOffsetcCs||f|_dS)N)tup)rZp1Zp2rwrwrxr$sz _ParseResultsWithOffset.__init__cCs |j|S)N)r)rirwrwrx __getitem__&sz#_ParseResultsWithOffset.__getitem__cCst|jdS)Nr)reprr)rrwrwrxr(sz _ParseResultsWithOffset.__repr__cCs|jd|f|_dS)Nr)r)rrrwrwrx setOffset*sz!_ParseResultsWithOffset.setOffsetN)rrrrrrrrwrwrwrxr#src@seZdZdZd[ddZddddefddZdd Zefd d Zd d Z ddZ ddZ ddZ e Z ddZddZddZddZddZereZeZeZn$eZeZeZddZd d!Zd"d#Zd$d%Zd&d'Zd\d(d)Zd*d+Zd,d-Zd.d/Zd0d1Z d2d3Z!d4d5Z"d6d7Z#d8d9Z$d:d;Z%d} - see L{ParserElement.setResultsName}) Example:: integer = Word(nums) date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") # parseString returns a ParseResults object result = date_str.parseString("1999/12/31") def test(s, fn=repr): print("%s -> %s" % (s, fn(eval(s)))) test("list(result)") test("result[0]") test("result['month']") test("result.day") test("'month' in result") test("'minutes' in result") test("result.dump()", str) prints:: list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' result.day -> '31' 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - day: 31 - month: 12 - year: 1999 NTcCs"t||r|Stj|}d|_|S)NT)rzobject__new___ParseResults__doinit)rtoklistnameasListmodalZretobjrwrwrxrTs   zParseResults.__new__c Cs`|jrvd|_d|_d|_i|_||_||_|dkr6g}||trP|dd|_n||trft||_n|g|_t |_ |dk o|r\|sd|j|<||t rt |}||_||t dttfo|ddgfks\||tr|g}|r&||trt|jd||<ntt|dd||<|||_n6y|d||<Wn$tttfk rZ|||<YnXdS)NFrr)r_ParseResults__name_ParseResults__parent_ParseResults__accumNames_ParseResults__asList_ParseResults__modallist_ParseResults__toklist_generatorTypedict_ParseResults__tokdictrurr basestringr"rcopyKeyError TypeError IndexError)rrrrrrzrwrwrxr]sB     $   zParseResults.__init__cCsPt|ttfr|j|S||jkr4|j|ddStdd|j|DSdS)NrrrcSsg|] }|dqS)rrw)rvrwrwrx sz,ParseResults.__getitem__..rs)rzruslicerrrr")rrrwrwrxrs   zParseResults.__getitem__cCs||tr0|jj|t|g|j|<|d}nD||ttfrN||j|<|}n&|jj|tt|dg|j|<|}||trt||_ dS)Nr) rrgetrrurrr"wkrefr)rkrrzsubrwrwrx __setitem__s   " zParseResults.__setitem__c Cst|ttfrt|j}|j|=t|trH|dkr:||7}t||d}tt|j|}|jx^|j j D]F\}}x<|D]4}x.t |D]"\}\}} t || | |k||<qWq|WqnWn|j |=dS)Nrrr) rzrurlenrrrangeindicesreverseritems enumerater) rrZmylenZremovedr occurrencesjrvaluepositionrwrwrx __delitem__s   $zParseResults.__delitem__cCs ||jkS)N)r)rrrwrwrx __contains__szParseResults.__contains__cCs t|jS)N)rr)rrwrwrx__len__szParseResults.__len__cCs |j S)N)r)rrwrwrx__bool__szParseResults.__bool__cCs t|jS)N)iterr)rrwrwrx__iter__szParseResults.__iter__cCst|jdddS)Nrrrs)rr)rrwrwrx __reversed__szParseResults.__reversed__cCs$t|jdr|jjSt|jSdS)Niterkeys)hasattrrrr)rrwrwrx _iterkeyss  zParseResults._iterkeyscsfddjDS)Nc3s|]}|VqdS)Nrw)rr)rrwrxrsz+ParseResults._itervalues..)r)rrw)rrx _itervaluesszParseResults._itervaluescsfddjDS)Nc3s|]}||fVqdS)Nrw)rr)rrwrxrsz*ParseResults._iteritems..)r)rrw)rrx _iteritemsszParseResults._iteritemscCs t|jS)zVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).)rr)rrwrwrxkeysszParseResults.keyscCs t|jS)zXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).)r itervalues)rrwrwrxvaluesszParseResults.valuescCs t|jS)zfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).)r iteritems)rrwrwrxrszParseResults.itemscCs t|jS)zSince keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.)boolr)rrwrwrxhaskeysszParseResults.haskeyscOs|s dg}x6|jD]*\}}|dkr2|d|f}qtd|qWt|dtsht|dksh|d|kr|d}||}||=|S|d}|SdS)a Removes and returns item at specified index (default=C{last}). Supports both C{list} and C{dict} semantics for C{pop()}. If passed no argument or an integer argument, it will use C{list} semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use C{dict} semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in C{dict.pop()}. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] rrdefaultrz-pop() got an unexpected keyword argument '%s'Nrs)rrrzrur)rrkwargsrrindexrZ defaultvaluerwrwrxpops"  zParseResults.popcCs||kr||S|SdS)ai Returns named result matching the given key, or if there is no such name, then returns the given C{defaultValue} or C{None} if no C{defaultValue} is specified. Similar to C{dict.get()}. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' print(result.get("hour", "not specified")) # -> 'not specified' print(result.get("hour")) # -> None Nrw)rkey defaultValuerwrwrxrszParseResults.getcCsZ|jj||xF|jjD]8\}}x.t|D]"\}\}}t||||k||<q,WqWdS)a Inserts new element at location index in the list of parsed tokens. Similar to C{list.insert()}. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] N)rinsertrrrr)rrZinsStrrrrrrrwrwrxr2szParseResults.insertcCs|jj|dS)a Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] N)rappend)ritemrwrwrxrFs zParseResults.appendcCs$t|tr||7}n |jj|dS)a Add sequence of elements to end of ParseResults list of elements. Example:: patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) return ''.join(tokens) print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' N)rzr"rextend)rZitemseqrwrwrxrTs  zParseResults.extendcCs|jdd=|jjdS)z7 Clear all elements and results names. N)rrclear)rrwrwrxrfs zParseResults.clearc Csfy||Stk rdSX||jkr^||jkrD|j|ddStdd|j|DSndSdS)NrrrrcSsg|] }|dqS)rrw)rrrwrwrxrwsz,ParseResults.__getattr__..rs)rrrr")rrrwrwrxrms  zParseResults.__getattr__cCs|j}||7}|S)N)r)rotherrrwrwrx__add__{szParseResults.__add__cs|jrnt|jfdd|jj}fdd|D}x4|D],\}}|||<t|dtr>t||d_q>W|j|j7_|jj |j|S)Ncs|dkr S|S)Nrrw)a)offsetrwrxrysz'ParseResults.__iadd__..c s4g|],\}}|D]}|t|d|dfqqS)rrr)r)rrvlistr) addoffsetrwrxrsz)ParseResults.__iadd__..r) rrrrrzr"rrrupdate)rrZ otheritemsZotherdictitemsrrrw)rrrx__iadd__s    zParseResults.__iadd__cCs&t|tr|dkr|jS||SdS)Nr)rzrur)rrrwrwrx__radd__szParseResults.__radd__cCsdt|jt|jfS)Nz(%s, %s))rrr)rrwrwrxrszParseResults.__repr__cCsddjdd|jDdS)N[z, css(|] }t|trt|nt|VqdS)N)rzr"rr)rrrwrwrxrsz'ParseResults.__str__..])rr)rrwrwrxrszParseResults.__str__rcCsPg}xF|jD]<}|r"|r"|j|t|tr:||j7}q |jt|q W|S)N)rrrzr" _asStringListr)rsepoutrrwrwrxr s   zParseResults._asStringListcCsdd|jDS)a Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] cSs"g|]}t|tr|jn|qSrw)rzr"r)rresrwrwrxrsz'ParseResults.asList..)r)rrwrwrxrszParseResults.asListcs6tr |j}n|j}fddtfdd|DS)a Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} cs6t|tr.|jr|jSfdd|DSn|SdS)Ncsg|] }|qSrwrw)rr)toItemrwrxrsz7ParseResults.asDict..toItem..)rzr"rasDict)r)rrwrxrs  z#ParseResults.asDict..toItemc3s|]\}}||fVqdS)Nrw)rrr)rrwrxrsz&ParseResults.asDict..)PY_3rrr)rZitem_fnrw)rrxrs  zParseResults.asDictcCs8t|j}|jj|_|j|_|jj|j|j|_|S)zA Returns a new copy of a C{ParseResults} object. )r"rrrrrrr)rrrwrwrxrs   zParseResults.copyFc CsPd}g}tdd|jjD}|d}|s8d}d}d}d} |dk rJ|} n |jrV|j} | sf|rbdSd} |||d| d g7}xt|jD]\} } t| tr| |kr|| j|| |o|dk||g7}n|| jd|o|dk||g7}qd} | |kr|| } | s |rqnd} t t | } |||d| d | d | d g 7}qW|||d | d g7}dj |S) z (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.  css(|] \}}|D]}|d|fVqqdS)rrNrw)rrrrrwrwrxrsz%ParseResults.asXML..z rNZITEM<>z.z %s%s- %s: z rrcss|]}t|tVqdS)N)rzr")rvvrwrwrxrssz %s%s[%d]: %s%s%sr) rrrrsortedrrzr"dumpranyrr) rrdepthfullr NLrrrrrrwrwrxrPs,   4.zParseResults.dumpcOstj|jf||dS)a Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] N)pprintr)rrrrwrwrxr"}szParseResults.pprintcCs.|j|jj|jdk r|jp d|j|jffS)N)rrrrrr)rrwrwrx __getstate__s zParseResults.__getstate__cCsN|d|_|d\|_}}|_i|_|jj||dk rDt||_nd|_dS)Nrrr)rrrrrrr)rstaterZ inAccumNamesrwrwrx __setstate__s   zParseResults.__setstate__cCs|j|j|j|jfS)N)rrrr)rrwrwrx__getnewargs__szParseResults.__getnewargs__cCstt|t|jS)N)rrrr)rrwrwrxrszParseResults.__dir__)NNTT)N)r)NFrT)rrT)4rrrrrrzrrrrrrr __nonzero__rrrrrrrrrrrrrrrrrrrrrrrrrr rrrrrrrr"r#r%r&rrwrwrwrxr"-sh& ' 4  # =% - cCsF|}d|kot|knr4||ddkr4dS||jdd|S)aReturns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. rrrr)rrfind)rstrgrrwrwrxr9s cCs|jdd|dS)aReturns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. rrrr)count)rr)rwrwrxrJs cCsF|jdd|}|jd|}|dkr2||d|S||ddSdS)zfReturns the line of text containing loc within a string, counting newlines as line separators. rrrrN)r(find)rr)ZlastCRZnextCRrwrwrxrGs  cCs8tdt|dt|dt||t||fdS)NzMatch z at loc z(%d,%d))printrrJr9)instringrexprrwrwrx_defaultStartDebugActionsr/cCs$tdt|dt|jdS)NzMatched z -> )r,rr{r)r-startlocZendlocr.toksrwrwrx_defaultSuccessDebugActionsr2cCstdt|dS)NzException raised:)r,r)r-rr.excrwrwrx_defaultExceptionDebugActionsr4cGsdS)zG'Do-nothing' debug action, to suppress debugging output during parsing.Nrw)rrwrwrxrQsrqc stkrfddSdgdgtdddkrFddd }dd d n tj}tjd }|dd d}|d|d|ffdd}d}ytdtdj}Wntk rt}YnX||_|S)Ncs|S)Nrw)rlrv)funcrwrxrysz_trim_arity..rFrqrocSs8tdkr dnd }tj| |dd|}|j|jfgS) Nror7rrqrr)limit)ror7r)system_version traceback extract_stackfilenamerJ)r8r frame_summaryrwrwrxr=sz"_trim_arity..extract_stackcSs$tj||d}|d}|j|jfgS)N)r8rrrs)r< extract_tbr>rJ)tbr8Zframesr?rwrwrxr@sz_trim_arity..extract_tb)r8rrcsxy |dd}dd<|Stk rdr>n4z.tjd}|dddddksjWd~Xdkrdd7<wYqXqWdS)NrTrrrq)r8rsrs)rr~exc_info)rrrA)r@ foundArityr6r8maxargspa_call_line_synthrwrxwrappers"  z_trim_arity..wrapperzr __class__)ror7)r)rrs) singleArgBuiltinsr;r<r=r@getattrr Exceptionr{)r6rEr=Z LINE_DIFFZ this_linerG func_namerw)r@rDr6r8rErFrx _trim_aritys*   rMcseZdZdZdZdZeddZeddZddd Z d d Z d d Z dddZ dddZ ddZddZddZddZddZddZddd Zd!d"Zdd#d$Zd%d&Zd'd(ZGd)d*d*eZed+k rGd,d-d-eZnGd.d-d-eZiZeZd/d/gZ dd0d1Z!eZ"ed2d3Z#dZ$edd5d6Z%dd7d8Z&e'dfd9d:Z(d;d<Z)e'fd=d>Z*e'dfd?d@Z+dAdBZ,dCdDZ-dEdFZ.dGdHZ/dIdJZ0dKdLZ1dMdNZ2dOdPZ3dQdRZ4dSdTZ5dUdVZ6dWdXZ7dYdZZ8dd[d\Z9d]d^Z:d_d`Z;dadbZdgdhZ?ddidjZ@dkdlZAdmdnZBdodpZCdqdrZDgfdsdtZEddudvZFfdwdxZGdydzZHd{d|ZId}d~ZJddZKdddZLdddZMZNS)r$z)Abstract base level parser element class.z FcCs |t_dS)a Overrides the default whitespace chars Example:: # default whitespace chars are space, and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] N)r$DEFAULT_WHITE_CHARS)charsrwrwrxsetDefaultWhitespaceChars=s z'ParserElement.setDefaultWhitespaceCharscCs |t_dS)a Set class to be used for inclusion of string literals into a parser. Example:: # default literal class used is Literal integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] N)r$_literalStringClass)rrwrwrxinlineLiteralsUsingLsz!ParserElement.inlineLiteralsUsingcCst|_d|_d|_d|_||_d|_tj|_ d|_ d|_ d|_ t|_ d|_d|_d|_d|_d|_d|_d|_d|_d|_dS)NTFr)NNN)r parseAction failActionstrRepr resultsName saveAsListskipWhitespacer$rN whiteCharscopyDefaultWhiteCharsmayReturnEmptykeepTabs ignoreExprsdebug streamlined mayIndexErrorerrmsg modalResults debugActionsre callPreparse callDuringTry)rsavelistrwrwrxras(zParserElement.__init__cCs<tj|}|jdd|_|jdd|_|jr8tj|_|S)a$ Make a copy of this C{ParserElement}. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of C{expr.copy()} is just C{expr()}:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") N)rrSr]rZr$rNrY)rZcpyrwrwrxrxs  zParserElement.copycCs*||_d|j|_t|dr&|j|j_|S)af Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) z Expected exception)rrarrhr)rrrwrwrxsetNames    zParserElement.setNamecCs4|j}|jdr"|dd}d}||_| |_|S)aP Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") *NrrTrs)rendswithrVrb)rrlistAllMatchesZnewselfrwrwrxsetResultsNames  zParserElement.setResultsNameTcs@|r&|jdfdd }|_||_nt|jdr<|jj|_|S)zMethod to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. Tcsddl}|j||||S)Nr)pdbZ set_trace)r-r doActions callPreParsern) _parseMethodrwrxbreakersz'ParserElement.setBreak..breaker_originalParseMethod)TT)_parsersr)rZ breakFlagrrrw)rqrxsetBreaks  zParserElement.setBreakcOs&tttt||_|jdd|_|S)a  Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. Example:: integer = Word(nums) date_str = integer + '/' + integer + '/' + integer date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # use parse action to convert to ints at parse time integer = Word(nums).setParseAction(lambda toks: int(toks[0])) date_str = integer + '/' + integer + '/' + integer # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] rfF)rmaprMrSrrf)rfnsrrwrwrxrs"zParserElement.setParseActioncOs4|jtttt|7_|jp,|jdd|_|S)z Add parse action to expression's list of parse actions. See L{I{setParseAction}}. See examples in L{I{copy}}. rfF)rSrrvrMrfr)rrwrrwrwrxaddParseActionszParserElement.addParseActioncsb|jdd|jddrtntx(|D] fdd}|jj|q&W|jpZ|jdd|_|S)aAdd a boolean predicate function to expression's list of parse actions. See L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, functions passed to C{addCondition} need to return boolean success/fail of the condition. Optional keyword arguments: - message = define a custom message to be used in the raised exception - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") date_str = year_int + '/' + integer + '/' + integer result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) messagezfailed user-defined conditionfatalFcs$tt|||s ||dS)N)rrM)rr5rv)exc_typefnrrwrxpasz&ParserElement.addCondition..parf)rr!rrSrrf)rrwrr}rw)r{r|rrx addConditions  zParserElement.addConditioncCs ||_|S)a Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments C{fn(s,loc,expr,err)} where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw C{L{ParseFatalException}} if it is desired to stop parsing immediately.)rT)rr|rwrwrx setFailActions zParserElement.setFailActionc CsZd}xP|rTd}xB|jD]8}yx|j||\}}d}qWWqtk rLYqXqWqW|S)NTF)r]rtr)rr-rZ exprsFoundeZdummyrwrwrx_skipIgnorables#s  zParserElement._skipIgnorablescCsL|jr|j||}|jrH|j}t|}x ||krF|||krF|d7}q(W|S)Nrr)r]rrXrYr)rr-rZwtinstrlenrwrwrxpreParse0s  zParserElement.preParsecCs|gfS)Nrw)rr-rrorwrwrx parseImpl<szParserElement.parseImplcCs|S)Nrw)rr-r tokenlistrwrwrx postParse?szParserElement.postParsec "Cs|j}|s|jr|jdr,|jd||||rD|jrD|j||}n|}|}yDy|j|||\}}Wn(tk rt|t||j |YnXWnXt k r} z<|jdr|jd|||| |jr|j|||| WYdd} ~ XnXn|o|jr|j||}n|}|}|j s$|t|krhy|j|||\}}Wn*tk rdt|t||j |YnXn|j|||\}}|j |||}t ||j|j|jd} |jr|s|jr|rVyRxL|jD]B} | ||| }|dk rt ||j|jot|t tf|jd} qWWnFt k rR} z(|jdr@|jd|||| WYdd} ~ XnXnNxL|jD]B} | ||| }|dk r^t ||j|jot|t tf|jd} q^W|r|jdr|jd||||| || fS)Nrrq)rrrr)r^rTrcrerrrrrrarr`rr"rVrWrbrSrfrzr) rr-rrorpZ debuggingprelocZ tokensStarttokenserrZ retTokensr|rwrwrx _parseNoCacheCsp             zParserElement._parseNoCachec Cs>y|j||dddStk r8t|||j|YnXdS)NF)ror)rtr!rra)rr-rrwrwrxtryParseszParserElement.tryParsec Cs2y|j||Wnttfk r(dSXdSdS)NFT)rrr)rr-rrwrwrx canParseNexts zParserElement.canParseNextc@seZdZddZdS)zParserElement._UnboundedCachecsdit|_fdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)cache not_in_cacherwrxrsz3ParserElement._UnboundedCache.__init__..getcs ||<dS)Nrw)rrr)rrwrxsetsz3ParserElement._UnboundedCache.__init__..setcs jdS)N)r)r)rrwrxrsz5ParserElement._UnboundedCache.__init__..clear)rrtypes MethodTyperrr)rrrrrw)rrrxrs   z&ParserElement._UnboundedCache.__init__N)rrrrrwrwrwrx_UnboundedCachesrNc@seZdZddZdS)zParserElement._FifoCachecsht|_tfdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)rrrwrxrsz.ParserElement._FifoCache.__init__..getcs"||<tkrjddS)NF)rpopitem)rrr)rsizerwrxrs z.ParserElement._FifoCache.__init__..setcs jdS)N)r)r)rrwrxrsz0ParserElement._FifoCache.__init__..clear)rr _OrderedDictrrrrr)rrrrrrw)rrrrxrs  z!ParserElement._FifoCache.__init__N)rrrrrwrwrwrx _FifoCachesrc@seZdZddZdS)zParserElement._FifoCachecsvt|_itjgfdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)rrrwrxrsz.ParserElement._FifoCache.__init__..getcs2||<tkr$jjdj|dS)N)rrpopleftr)rrr)rkey_fiforrwrxrs z.ParserElement._FifoCache.__init__..setcsjjdS)N)r)r)rrrwrxrsz0ParserElement._FifoCache.__init__..clear) rr collectionsdequerrrrr)rrrrrrw)rrrrrxrs  z!ParserElement._FifoCache.__init__N)rrrrrwrwrwrxrsrc Csd\}}|||||f}tjtj}|j|} | |jkrtj|d7<y|j||||} Wn8tk r} z|j|| j | j WYdd} ~ XqX|j|| d| dj f| Sn4tj|d7<t | t r| | d| dj fSWdQRXdS)Nrrr)rrr)r$packrat_cache_lock packrat_cacherrpackrat_cache_statsrrrrHrrrzrK) rr-rrorpZHITZMISSlookuprrrrwrwrx _parseCaches$   zParserElement._parseCachecCs(tjjdgttjtjdd<dS)Nr)r$rrrrrwrwrwrx resetCaches zParserElement.resetCachecCs8tjs4dt_|dkr tjt_n tj|t_tjt_dS)aEnables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. Parameters: - cache_size_limit - (default=C{128}) - if an integer value is provided will limit the size of the packrat cache; if None is passed, then the cache size will be unbounded; if 0 is passed, the cache will be effectively disabled. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method C{ParserElement.enablePackrat()}. If your program uses C{psyco} to "compile as you go", you must call C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. Example:: import pyparsing pyparsing.ParserElement.enablePackrat() TN)r$_packratEnabledrrrrrt)Zcache_size_limitrwrwrx enablePackrats   zParserElement.enablePackratcCstj|js|jx|jD] }|jqW|js<|j}y<|j|d\}}|rv|j||}t t }|j||Wn0t k r}ztj rn|WYdd}~XnX|SdS)aB Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set C{parseAll} to True (equivalent to ending the grammar with C{L{StringEnd()}}). Note: C{parseString} implicitly calls C{expandtabs()} on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the C{loc} argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling C{parseWithTabs} on your grammar before calling C{parseString} (see L{I{parseWithTabs}}) - define your parse action using the full C{(s,loc,toks)} signature, and reference the input string using the parse action's C{s} argument - explictly expand the tabs in your input string before calling C{parseString} Example:: Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text rN) r$rr_ streamliner]r\ expandtabsrtrr r)rverbose_stacktrace)rr-parseAllrrrZser3rwrwrx parseString#s$    zParserElement.parseStringccs@|js|jx|jD] }|jqW|js8t|j}t|}d}|j}|j}t j d} yx||kon| |kry |||} ||| dd\} } Wnt k r| d}Yq`X| |kr| d7} | | | fV|r|||} | |kr| }q|d7}n| }q`| d}q`WWn4t k r:}zt j r&n|WYdd}~XnXdS)a Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional C{maxMatches} argument, to clip scanning after 'n' matches are found. If C{overlap} is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}} for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd rF)rprrN)r_rr]r\rrrrrtr$rrrr)rr- maxMatchesZoverlaprrrZ preparseFnZparseFnmatchesrZnextLocrZnextlocr3rwrwrx scanStringUsB       zParserElement.scanStringcCsg}d}d|_yxh|j|D]Z\}}}|j||||rrt|trT||j7}nt|trh||7}n |j||}qW|j||ddd|D}djtt t |St k r}zt j rȂn|WYdd}~XnXdS)af Extension to C{L{scanString}}, to modify matching text with modified tokens that may be returned from a parse action. To use C{transformString}, define a grammar and attach a parse action to it that modifies the returned token list. Invoking C{transformString()} on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. C{transformString()} returns the resulting transformed string. Example:: wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) Prints:: Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. rTNcSsg|] }|r|qSrwrw)rorwrwrxrsz1ParserElement.transformString..r)r\rrrzr"rrrrvr_flattenrr$r)rr-r ZlastErvrrr3rwrwrxrs(    zParserElement.transformStringcCsPytdd|j||DStk rJ}ztjr6n|WYdd}~XnXdS)a~ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) prints:: ['More', 'Iron', 'Lead', 'Gold', 'I'] cSsg|]\}}}|qSrwrw)rrvrrrwrwrxrsz.ParserElement.searchString..N)r"rrr$r)rr-rr3rwrwrx searchStrings zParserElement.searchStringc csXd}d}x<|j||dD]*\}}}|||V|r>|dV|}qW||dVdS)a[ Generator method to split a string using the given expression as a separator. May be called with optional C{maxsplit} argument, to limit the number of splits; and the optional C{includeSeparators} argument (default=C{False}), if the separating matching text should be included in the split results. Example:: punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) prints:: ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] r)rN)r) rr-maxsplitZincludeSeparatorsZsplitsZlastrvrrrwrwrxrs  zParserElement.splitcCsFt|trtj|}t|ts:tjdt|tdddSt||gS)a Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement converts them to L{Literal}s by default. Example:: greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) Prints:: Hello, World! -> ['Hello', ',', 'World', '!'] z4Cannot combine element of type %s with ParserElementrq) stacklevelN) rzrr$rQwarningswarnr SyntaxWarningr)rrrwrwrxrs    zParserElement.__add__cCsBt|trtj|}t|ts:tjdt|tdddS||S)z] Implementation of + operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrxrs    zParserElement.__radd__cCsLt|trtj|}t|ts:tjdt|tdddSt|tj |gS)zQ Implementation of - operator, returns C{L{And}} with error stop z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr _ErrorStop)rrrwrwrx__sub__s    zParserElement.__sub__cCsBt|trtj|}t|ts:tjdt|tdddS||S)z] Implementation of - operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rsub__ s    zParserElement.__rsub__cst|tr|d}}nt|tr|d dd}|ddkrHd|df}t|dtr|ddkr|ddkrvtS|ddkrtS|dtSnJt|dtrt|dtr|\}}||8}ntdt|dt|dntdt||dkr td|dkrtd||ko2dknrBtd |rfd d |r|dkrt|}ntg||}n|}n|dkr}ntg|}|S) a Implementation of * operator, allows use of C{expr * 3} in place of C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples may also include C{None} as in: - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} Note that C{expr*(None,n)} does not raise an exception if more than n exprs exist in the input stream; that is, C{expr*(None,n)} does not enforce a maximum number of expr occurrences. If this behavior is desired, then write C{expr*(None,n) + ~expr} rNrqrrz7cannot multiply 'ParserElement' and ('%s','%s') objectsz0cannot multiply 'ParserElement' and '%s' objectsz/cannot multiply ParserElement by negative valuez@second tuple value must be greater or equal to first tuple valuez+cannot multiply ParserElement by 0 or (0,0)cs(|dkrt|dStSdS)Nrr)r)n)makeOptionalListrrwrxr]sz/ParserElement.__mul__..makeOptionalList)NN) rzrutupler2rrr ValueErrorr)rrZ minElementsZ optElementsrrw)rrrx__mul__,sD             zParserElement.__mul__cCs |j|S)N)r)rrrwrwrx__rmul__pszParserElement.__rmul__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zI Implementation of | operator - returns C{L{MatchFirst}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr)rrrwrwrx__or__ss    zParserElement.__or__cCsBt|trtj|}t|ts:tjdt|tdddS||BS)z] Implementation of | operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__ror__s    zParserElement.__ror__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zA Implementation of ^ operator - returns C{L{Or}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr)rrrwrwrx__xor__s    zParserElement.__xor__cCsBt|trtj|}t|ts:tjdt|tdddS||AS)z] Implementation of ^ operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rxor__s    zParserElement.__rxor__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zC Implementation of & operator - returns C{L{Each}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr )rrrwrwrx__and__s    zParserElement.__and__cCsBt|trtj|}t|ts:tjdt|tdddS||@S)z] Implementation of & operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rand__s    zParserElement.__rand__cCst|S)zE Implementation of ~ operator - returns C{L{NotAny}} )r)rrwrwrx __invert__szParserElement.__invert__cCs|dk r|j|S|jSdS)a  Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") N)rmr)rrrwrwrx__call__s zParserElement.__call__cCst|S)z Suppresses the output of this C{ParserElement}; useful to keep punctuation from cluttering up returned output. )r+)rrwrwrxsuppressszParserElement.suppresscCs d|_|S)a Disables the skipping of whitespace before matching the characters in the C{ParserElement}'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. F)rX)rrwrwrxleaveWhitespaceszParserElement.leaveWhitespacecCsd|_||_d|_|S)z8 Overrides the default whitespace chars TF)rXrYrZ)rrOrwrwrxsetWhitespaceCharssz ParserElement.setWhitespaceCharscCs d|_|S)z Overrides default behavior to expand C{}s to spaces before parsing the input string. Must be called before C{parseString} when the input grammar contains elements that match C{} characters. T)r\)rrwrwrx parseWithTabsszParserElement.parseWithTabscCsLt|trt|}t|tr4||jkrH|jj|n|jjt|j|S)a Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] )rzrr+r]rr)rrrwrwrxignores   zParserElement.ignorecCs"|pt|p t|ptf|_d|_|S)zT Enable display of debugging messages while doing pattern matching. T)r/r2r4rcr^)rZ startActionZ successActionZexceptionActionrwrwrxsetDebugActions s  zParserElement.setDebugActionscCs|r|jtttnd|_|S)a Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match at loc (,)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. F)rr/r2r4r^)rflagrwrwrxsetDebugs#zParserElement.setDebugcCs|jS)N)r)rrwrwrxr@szParserElement.__str__cCst|S)N)r)rrwrwrxrCszParserElement.__repr__cCsd|_d|_|S)NT)r_rU)rrwrwrxrFszParserElement.streamlinecCsdS)Nrw)rrrwrwrxcheckRecursionKszParserElement.checkRecursioncCs|jgdS)zj Check defined expressions for valid structure, check for infinite recursive definitions. N)r)r validateTracerwrwrxvalidateNszParserElement.validatecCsy |j}Wn2tk r>t|d}|j}WdQRXYnXy |j||Stk r|}ztjrhn|WYdd}~XnXdS)z Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. rN)readropenrrr$r)rZfile_or_filenamerZ file_contentsfr3rwrwrx parseFileTs   zParserElement.parseFilecsHt|tr"||kp t|t|kSt|tr6|j|Stt||kSdS)N)rzr$varsrrsuper)rr)rHrwrx__eq__hs    zParserElement.__eq__cCs ||k S)Nrw)rrrwrwrx__ne__pszParserElement.__ne__cCs tt|S)N)hashid)rrwrwrx__hash__sszParserElement.__hash__cCs||kS)Nrw)rrrwrwrx__req__vszParserElement.__req__cCs ||k S)Nrw)rrrwrwrx__rne__yszParserElement.__rne__c Cs0y|jt||ddStk r*dSXdS)a Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. Parameters: - testString - to test against this expression for a match - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests Example:: expr = Word(nums) assert expr.matches("100") )rTFN)rrr)rZ testStringrrwrwrxr|s zParserElement.matches#cCst|tr"tttj|jj}t|tr4t|}g}g}d} x|D]} |dk rb|j | dsl|rx| rx|j | qH| s~qHdj || g} g}y:| j dd} |j | |d} | j | j|d| o| } Wntk rx} zt| trdnd }d| kr0| j t| j| | j d t| j| d d |n| j d | jd || j d t| | ob|} | } WYdd} ~ XnDtk r}z&| j dt|| o|} |} WYdd}~XnX|r|r| j d tdj | |j | | fqHW| |fS)a3 Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. Parameters: - tests - a list of separate test strings, or a multiline string of test strings - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - comment - (default=C{'#'}) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; if False, only dump nested list - printResults - (default=C{True}) prints test output to stdout - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if C{failureTests} is True), and the results contain a list of lines of each test's output Example:: number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' # unsigned integer 100 # negative integer -100 # float with scientific notation 6.02e23 # integer with scientific notation 1e-12 ''') print("Success" if result[0] else "Failed!") result = number_expr.runTests(''' # stray character 100Z # missing leading digit before '.' -.100 # too many '.' 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") prints:: # unsigned integer 100 [100] # negative integer -100 [-100] # float with scientific notation 6.02e23 [6.02e+23] # integer with scientific notation 1e-12 [1e-12] Success # stray character 100Z ^ FAIL: Expected end of text (at char 3), (line:1, col:4) # missing leading digit before '.' -.100 ^ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) # too many '.' 3.14.159 ^ FAIL: Expected end of text (at char 4), (line:1, col:5) Success Each test string must be on a single line. If you want to test a string that spans multiple lines, create a test like this:: expr.runTest(r"this is a test\n of strings that spans \n 3 lines") (Note that this is a raw string literal, you must include the leading 'r'.) TNFrz\n)r)r z(FATAL)r rr^zFAIL: zFAIL-EXCEPTION: )rzrrrvr{rrstrip splitlinesrrrrrrrrr!rGrr9rKr,)rZtestsrZcommentZfullDumpZ printResultsZ failureTestsZ allResultsZcommentssuccessrvr resultrrzr3rwrwrxrunTestssNW     $   zParserElement.runTests)F)F)T)T)TT)TT)r)F)N)T)F)T)TrTTF)OrrrrrNr staticmethodrPrRrrrirmrurrxr~rrrrrrrrrrrrrrrrrrtrrrr_MAX_INTrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr __classcell__rwrw)rHrxr$8s     &     H   " 2G+    D           )    cs eZdZdZfddZZS)r,zT Abstract C{ParserElement} subclass, for defining atomic matching patterns. cstt|jdddS)NF)rg)rr,r)r)rHrwrxr szToken.__init__)rrrrrrrwrw)rHrxr, scs eZdZdZfddZZS)r z, An empty token, will always match. cs$tt|jd|_d|_d|_dS)Nr TF)rr rrr[r`)r)rHrwrxr szEmpty.__init__)rrrrrrrwrw)rHrxr  scs*eZdZdZfddZdddZZS)rz( A token that will never match. cs*tt|jd|_d|_d|_d|_dS)NrTFzUnmatchable token)rrrrr[r`ra)r)rHrwrxr* s zNoMatch.__init__TcCst|||j|dS)N)rra)rr-rrorwrwrxr1 szNoMatch.parseImpl)T)rrrrrrrrwrw)rHrxr& s cs*eZdZdZfddZdddZZS)ra Token to exactly match a specified string. Example:: Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" For case-insensitive matching, use L{CaselessLiteral}. For keyword matching (force word break before and after the matched string), use L{Keyword} or L{CaselessKeyword}. c stt|j||_t||_y|d|_Wn*tk rVtj dt ddt |_ YnXdt |j|_d|j|_d|_d|_dS)Nrz2null string passed to Literal; use Empty() insteadrq)rz"%s"z Expected F)rrrmatchrmatchLenfirstMatchCharrrrrr rHrrrar[r`)r matchString)rHrwrxrC s    zLiteral.__init__TcCsJ|||jkr6|jdks&|j|j|r6||j|jfSt|||j|dS)Nrr)rr startswithrrra)rr-rrorwrwrxrV szLiteral.parseImpl)T)rrrrrrrrwrw)rHrxr5 s  csLeZdZdZedZdfdd Zddd Zfd d Ze d d Z Z S)ra\ Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with C{L{Literal}}: - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$" - C{caseless} allows case-insensitive matching, default is C{False}. Example:: Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception For case-insensitive matching, use L{CaselessKeyword}. z_$NFc stt|j|dkrtj}||_t||_y|d|_Wn$tk r^t j dt ddYnXd|j|_ d|j |_ d|_d|_||_|r|j|_|j}t||_dS)Nrz2null string passed to Keyword; use Empty() insteadrq)rz"%s"z Expected F)rrrDEFAULT_KEYWORD_CHARSrrrrrrrrrrar[r`caselessupper caselessmatchr identChars)rrrr)rHrwrxrq s&    zKeyword.__init__TcCs|jr|||||jj|jkr|t||jksL|||jj|jkr|dksj||dj|jkr||j|jfSnv|||jkr|jdks|j|j|r|t||jks|||j|jkr|dks||d|jkr||j|jfSt |||j |dS)Nrrr) rrrrrrrrrrra)rr-rrorwrwrxr s*&zKeyword.parseImplcstt|j}tj|_|S)N)rrrrr)rr)rHrwrxr sz Keyword.copycCs |t_dS)z,Overrides the default Keyword chars N)rr)rOrwrwrxsetDefaultKeywordChars szKeyword.setDefaultKeywordChars)NF)T) rrrrr3rrrrrrrrwrw)rHrxr^ s   cs*eZdZdZfddZdddZZS)ral Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for L{CaselessKeyword}.) cs6tt|j|j||_d|j|_d|j|_dS)Nz'%s'z Expected )rrrr returnStringrra)rr)rHrwrxr s zCaselessLiteral.__init__TcCs@||||jj|jkr,||j|jfSt|||j|dS)N)rrrrrra)rr-rrorwrwrxr szCaselessLiteral.parseImpl)T)rrrrrrrrwrw)rHrxr s  cs,eZdZdZdfdd Zd ddZZS) rz Caseless version of L{Keyword}. Example:: OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for L{CaselessLiteral}.) Ncstt|j||dddS)NT)r)rrr)rrr)rHrwrxr szCaselessKeyword.__init__TcCsj||||jj|jkrV|t||jksF|||jj|jkrV||j|jfSt|||j|dS)N)rrrrrrrra)rr-rrorwrwrxr s*zCaselessKeyword.parseImpl)N)T)rrrrrrrrwrw)rHrxr scs,eZdZdZdfdd Zd ddZZS) rlax A variation on L{Literal} which matches "close" matches, that is, strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - C{match_string} - string to be matched - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match The results from a successful parse will contain the matched text from the input string and the following named results: - C{mismatches} - a list of the positions within the match_string where mismatches were found - C{original} - the original match_string used to compare against the input string If C{mismatches} is an empty list, then the match was an exact match. Example:: patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) # exact match patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) # close match allowing up to 2 mismatches patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) rrcsBtt|j||_||_||_d|j|jf|_d|_d|_dS)Nz&Expected %r (with up to %d mismatches)F) rrlrr match_string maxMismatchesrar`r[)rrr)rHrwrxr szCloseMatch.__init__TcCs|}t|}|t|j}||kr|j}d}g} |j} xtt||||jD]0\}} | \} } | | krP| j|t| | krPPqPW|d}t|||g}|j|d<| |d<||fSt|||j|dS)Nrrroriginal mismatches) rrrrrrr"rra)rr-rrostartrmaxlocrZmatch_stringlocrrZs_msrcmatresultsrwrwrxr s("   zCloseMatch.parseImpl)rr)T)rrrrrrrrwrw)rHrxrl s cs8eZdZdZd fdd Zdd d Zfd d ZZS)r/a Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. An optional C{excludeChars} parameter can list characters that might be found in the input C{bodyChars} string; useful to define a word of all printables except for one or two characters, for instance. L{srange} is useful for defining custom character set strings for defining C{Word} expressions, using range notation from regular expression character sets. A common mistake is to use C{Word} to match a specific literal string, as in C{Word("Address")}. Remember that C{Word} uses the string argument to define I{sets} of matchable characters. This expression would match "Add", "AAA", "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an exact literal string, use L{Literal} or L{Keyword}. pyparsing includes helper strings for building Words: - L{alphas} - L{nums} - L{alphanums} - L{hexnums} - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - L{printables} (any non-whitespace character) Example:: # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") NrrrFc stt|jrFdjfdd|D}|rFdjfdd|D}||_t||_|rl||_t||_n||_t||_|dk|_ |dkrt d||_ |dkr||_ nt |_ |dkr||_ ||_ t||_d|j|_d |_||_d |j|jkr|dkr|dkr|dkr|j|jkr8d t|j|_nHt|jdkrfd tj|jt|jf|_nd t|jt|jf|_|jrd|jd|_ytj|j|_Wntk rd|_YnXdS)Nrc3s|]}|kr|VqdS)Nrw)rr) excludeCharsrwrxr7 sz Word.__init__..c3s|]}|kr|VqdS)Nrw)rr)rrwrxr9 srrrzZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permittedz Expected Frz[%s]+z%s[%s]*z [%s][%s]*z\b)rr/rr initCharsOrigr initChars bodyCharsOrig bodyChars maxSpecifiedrminLenmaxLenrrrrar` asKeyword_escapeRegexRangeCharsreStringrrdescapecompilerK)rrrminmaxexactrr)rH)rrxr4 sT      0 z Word.__init__Tc CsD|jr<|jj||}|s(t|||j||j}||jfS|||jkrZt|||j||}|d7}t|}|j}||j }t ||}x ||kr|||kr|d7}qWd} |||j krd} |j r||kr|||krd} |j r|dkr||d|ks||kr|||krd} | r4t|||j|||||fS)NrrFTr)rdrrraendgrouprrrrr rrr) rr-rrorrrZ bodycharsrZthrowExceptionrwrwrxrj s6    4zWord.parseImplc stytt|jStk r"YnX|jdkrndd}|j|jkr^d||j||jf|_nd||j|_|jS)NcSs$t|dkr|dddS|SdS)Nz...)r)rrwrwrx charsAsStr s z Word.__str__..charsAsStrz W:(%s,%s)zW:(%s))rr/rrKrUrr)rr)rHrwrxr s  z Word.__str__)NrrrrFN)T)rrrrrrrrrwrw)rHrxr/ s.6 #csFeZdZdZeejdZd fdd Zd ddZ fd d Z Z S) r'a Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as named parse results. Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") z[A-Z]rc stt|jt|tr|s,tjdtdd||_||_ yt j |j|j |_ |j|_ Wqt jk rtjd|tddYqXn2t|tjr||_ t||_|_ ||_ ntdt||_d|j|_d|_d|_d S) zThe parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.z0null string passed to Regex; use Empty() insteadrq)rz$invalid pattern (%s) passed to RegexzCRegex may only be constructed with a string or a compiled RE objectz Expected FTN)rr'rrzrrrrpatternflagsrdr r sre_constantserrorcompiledREtyper{rrrrar`r[)rrr)rHrwrxr s.         zRegex.__init__TcCsd|jj||}|s"t|||j||j}|j}t|j}|r\x|D]}||||<qHW||fS)N)rdrrrar groupdictr"r)rr-rrordrrrwrwrxr s  zRegex.parseImplc sDytt|jStk r"YnX|jdkr>dt|j|_|jS)NzRe:(%s))rr'rrKrUrr)r)rHrwrxr s z Regex.__str__)r)T) rrrrrrdr rrrrrrwrw)rHrxr' s  " cs8eZdZdZd fdd Zd ddZfd d ZZS) r%a Token for matching strings that are delimited by quoting characters. Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=C{None}) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) Example:: qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) prints:: [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] NFTc sNttj|j}|s0tjdtddt|dkr>|}n"|j}|s`tjdtddt|_t |_ |d_ |_ t |_ |_|_|_|_|rtjtjB_dtjjtj d|dk rt|pdf_n.)z|(?:%s)z|(?:%s.)z(.)z)*%sz$invalid pattern (%s) passed to Regexz Expected FTrs)%rr%rrrrr SyntaxError quoteCharr quoteCharLenfirstQuoteCharrendQuoteCharLenescCharescQuoteunquoteResultsconvertWhitespaceEscapesrd MULTILINEDOTALLrr rrrrescCharReplacePatternr rrrrrrar`r[)rrr r!Z multiliner"rr#)rH)rrxr sf       6     zQuotedString.__init__c Cs|||jkr|jj||pd}|s4t|||j||j}|j}|jr||j|j }t |t rd|kr|j rddddd}x |j D]\}}|j||}qW|jrtj|jd|}|jr|j|j|j}||fS)N\ r  )z\tz\nz\fz\rz\g<1>)rrdrrrarrr"rrrzrr#rrr rr&r!r) rr-rrorrZws_mapZwslitZwscharrwrwrxrG s(  zQuotedString.parseImplc sFytt|jStk r"YnX|jdkr@d|j|jf|_|jS)Nz.quoted string, starting with %s ending with %s)rr%rrKrUrr)r)rHrwrxrj s zQuotedString.__str__)NNFTNT)T)rrrrrrrrrwrw)rHrxr% sA #cs8eZdZdZd fdd Zd ddZfd d ZZS) r a Token for matching words composed of characters I{not} in a given set (will include whitespace in matched characters if not listed in the provided exclusion set - see example). Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. Example:: # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) prints:: ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] rrrcstt|jd|_||_|dkr*td||_|dkr@||_nt|_|dkrZ||_||_t ||_ d|j |_ |jdk|_ d|_ dS)NFrrzfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedrz Expected )rr rrXnotCharsrrrrrrrar[r`)rr+r r r )rHrwrxr s    zCharsNotIn.__init__TcCs|||jkrt|||j||}|d7}|j}t||jt|}x ||krd|||krd|d7}qFW|||jkrt|||j|||||fS)Nrr)r+rrar rrr)rr-rrorZnotcharsmaxlenrwrwrxr s   zCharsNotIn.parseImplc sdytt|jStk r"YnX|jdkr^t|jdkrRd|jdd|_n d|j|_|jS)Nrz !W:(%s...)z!W:(%s))rr rrKrUrr+)r)rHrwrxr s  zCharsNotIn.__str__)rrrr)T)rrrrrrrrrwrw)rHrxr v s cs<eZdZdZddddddZdfd d ZdddZZS)r.a Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is C{" \t\r\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, as defined for the C{L{Word}} class. zzzzz)rr(rr*r) rrrcsttj|_jdjfddjDdjddjD_d_dj_ |_ |dkrt|_ nt _ |dkr|_ |_ dS)Nrc3s|]}|jkr|VqdS)N) matchWhite)rr)rrwrxr sz!White.__init__..css|]}tj|VqdS)N)r. whiteStrs)rrrwrwrxr sTz Expected r) rr.rr.rrrYrr[rarrr)rZwsr r r )rH)rrxr s  zWhite.__init__TcCs|||jkrt|||j||}|d7}||j}t|t|}x"||krd|||jkrd|d7}qDW|||jkrt|||j|||||fS)Nrr)r.rrarr rr)rr-rrorrrwrwrxr s  zWhite.parseImpl)r-rrrr)T)rrrrr/rrrrwrw)rHrxr. scseZdZfddZZS)_PositionTokencs(tt|j|jj|_d|_d|_dS)NTF)rr0rrHrrr[r`)r)rHrwrxr s z_PositionToken.__init__)rrrrrrwrw)rHrxr0 sr0cs2eZdZdZfddZddZd ddZZS) rzb Token to advance to a specific column of input text; useful for tabular report scraping. cstt|j||_dS)N)rrrr9)rcolno)rHrwrxr szGoToColumn.__init__cCs`t|||jkr\t|}|jr*|j||}x0||krZ||jrZt|||jkrZ|d7}q,W|S)Nrr)r9rr]risspace)rr-rrrwrwrxr s & zGoToColumn.preParseTcCsDt||}||jkr"t||d|||j|}|||}||fS)NzText not in expected column)r9r)rr-rroZthiscolZnewlocrrwrwrxr s    zGoToColumn.parseImpl)T)rrrrrrrrrwrw)rHrxr s  cs*eZdZdZfddZdddZZS)ra Matches if current position is at the beginning of a line within the parse string Example:: test = ''' AAA this line AAA and this line AAA but not this one B AAA and definitely not this one ''' for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) Prints:: ['AAA', ' this line'] ['AAA', ' and this line'] cstt|jd|_dS)NzExpected start of line)rrrra)r)rHrwrxr& szLineStart.__init__TcCs*t||dkr|gfSt|||j|dS)Nrr)r9rra)rr-rrorwrwrxr* szLineStart.parseImpl)T)rrrrrrrrwrw)rHrxr s cs*eZdZdZfddZdddZZS)rzU Matches if current position is at the end of a line within the parse string cs,tt|j|jtjjddd|_dS)NrrzExpected end of line)rrrrr$rNrra)r)rHrwrxr3 szLineEnd.__init__TcCsb|t|kr6||dkr$|ddfSt|||j|n(|t|krN|dgfSt|||j|dS)Nrrr)rrra)rr-rrorwrwrxr8 s     zLineEnd.parseImpl)T)rrrrrrrrwrw)rHrxr/ s cs*eZdZdZfddZdddZZS)r*zM Matches if current position is at the beginning of the parse string cstt|jd|_dS)NzExpected start of text)rr*rra)r)rHrwrxrG szStringStart.__init__TcCs0|dkr(||j|dkr(t|||j||gfS)Nr)rrra)rr-rrorwrwrxrK szStringStart.parseImpl)T)rrrrrrrrwrw)rHrxr*C s cs*eZdZdZfddZdddZZS)r)zG Matches if current position is at the end of the parse string cstt|jd|_dS)NzExpected end of text)rr)rra)r)rHrwrxrV szStringEnd.__init__TcCs^|t|krt|||j|n<|t|kr6|dgfS|t|krJ|gfSt|||j|dS)Nrr)rrra)rr-rrorwrwrxrZ s    zStringEnd.parseImpl)T)rrrrrrrrwrw)rHrxr)R s cs.eZdZdZeffdd ZdddZZS)r1ap Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of the string being parsed, or at the beginning of a line. cs"tt|jt||_d|_dS)NzNot at the start of a word)rr1rr wordCharsra)rr3)rHrwrxrl s zWordStart.__init__TcCs@|dkr8||d|jks(|||jkr8t|||j||gfS)Nrrr)r3rra)rr-rrorwrwrxrq s zWordStart.parseImpl)T)rrrrrVrrrrwrw)rHrxr1d scs.eZdZdZeffdd ZdddZZS)r0aZ Matches if the current position is at the end of a Word, and is not followed by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of the string being parsed, or at the end of a line. cs(tt|jt||_d|_d|_dS)NFzNot at the end of a word)rr0rrr3rXra)rr3)rHrwrxr s zWordEnd.__init__TcCsPt|}|dkrH||krH|||jks8||d|jkrHt|||j||gfS)Nrrr)rr3rra)rr-rrorrwrwrxr s zWordEnd.parseImpl)T)rrrrrVrrrrwrw)rHrxr0x scseZdZdZdfdd ZddZddZd d Zfd d Zfd dZ fddZ dfdd Z gfddZ fddZ ZS)r z^ Abstract subclass of ParserElement, for combining and post-processing parsed tokens. Fc stt|j|t|tr"t|}t|tr.F)rr rrzrrrr$rQexprsrIterableallrvrre)rr4rg)rHrwrxr s     zParseExpression.__init__cCs |j|S)N)r4)rrrwrwrxr szParseExpression.__getitem__cCs|jj|d|_|S)N)r4rrU)rrrwrwrxr s zParseExpression.appendcCs4d|_dd|jD|_x|jD] }|jq W|S)z~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on all contained expressions.FcSsg|] }|jqSrw)r)rrrwrwrxr sz3ParseExpression.leaveWhitespace..)rXr4r)rrrwrwrxr s   zParseExpression.leaveWhitespacecszt|trF||jkrvtt|j|xP|jD]}|j|jdq,Wn0tt|j|x|jD]}|j|jdq^W|S)Nrrrsrs)rzr+r]rr rr4)rrr)rHrwrxr s    zParseExpression.ignorec sLytt|jStk r"YnX|jdkrFd|jjt|jf|_|jS)Nz%s:(%s)) rr rrKrUrHrrr4)r)rHrwrxr s zParseExpression.__str__cs0tt|jx|jD] }|jqWt|jdkr|jd}t||jr|j r|jdkr|j r|jdd|jdg|_d|_ |j |j O_ |j |j O_ |jd}t||jo|j o|jdko|j r|jdd|jdd|_d|_ |j |j O_ |j |j O_ dt ||_|S)Nrqrrrz Expected rsrs)rr rr4rrzrHrSrVr^rUr[r`rra)rrr)rHrwrxr s0         zParseExpression.streamlinecstt|j||}|S)N)rr rm)rrrlr)rHrwrxrm szParseExpression.setResultsNamecCs:|dd|g}x|jD]}|j|qW|jgdS)N)r4rr)rrtmprrwrwrxr s zParseExpression.validatecs$tt|j}dd|jD|_|S)NcSsg|] }|jqSrw)r)rrrwrwrxr sz(ParseExpression.copy..)rr rr4)rr)rHrwrxr szParseExpression.copy)F)F)rrrrrrrrrrrrmrrrrwrw)rHrxr s " csTeZdZdZGdddeZdfdd ZdddZd d Zd d Z d dZ Z S)ra  Requires all given C{ParseExpression}s to be found in the given order. Expressions may be separated by whitespace. May be constructed using the C{'+'} operator. May also be constructed using the C{'-'} operator, which will suppress backtracking. Example:: integer = Word(nums) name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"),name_expr("name"),integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") cseZdZfddZZS)zAnd._ErrorStopcs&ttj|j||d|_|jdS)N-)rrrrrr)rrr)rHrwrxr szAnd._ErrorStop.__init__)rrrrrrwrw)rHrxr srTcsRtt|j||tdd|jD|_|j|jdj|jdj|_d|_ dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr szAnd.__init__..rT) rrrr6r4r[rrYrXre)rr4rg)rHrwrxr s z And.__init__c Cs|jdj|||dd\}}d}x|jddD]}t|tjrFd}q0|ry|j|||\}}Wqtk rvYqtk r}zd|_tj|WYdd}~Xqt k rt|t ||j |YqXn|j|||\}}|s|j r0||7}q0W||fS)NrF)rprrT) r4rtrzrrr#r __traceback__rrrrar) rr-rro resultlistZ errorStoprZ exprtokensrrwrwrxr s(   z And.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrxr5 s  z And.__iadd__cCs8|dd|g}x |jD]}|j||jsPqWdS)N)r4rr[)rrsubRecCheckListrrwrwrxr: s   zAnd.checkRecursioncCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nr{rcss|]}t|VqdS)N)r)rrrwrwrxrF szAnd.__str__..})rrrUrr4)rrwrwrxrA s    z And.__str__)T)T) rrrrr rrrrrrrrwrw)rHrxr s csDeZdZdZdfdd ZdddZdd Zd d Zd d ZZ S)ra Requires that at least one C{ParseExpression} is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the C{'^'} operator. Example:: # construct Or using '^' operator number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) prints:: [['123'], ['3.1416'], ['789']] Fcs:tt|j|||jr0tdd|jD|_nd|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr\ szOr.__init__..T)rrrr4rr[)rr4rg)rHrwrxrY sz Or.__init__Tc CsTd}d}g}x|jD]}y|j||}Wnvtk rd} z d| _| j|krT| }| j}WYdd} ~ Xqtk rt||krt|t||j|}t|}YqX|j||fqW|r*|j dddx`|D]X\} }y|j |||Stk r$} z"d| _| j|kr| }| j}WYdd} ~ XqXqW|dk rB|j|_ |nt||d|dS)NrrcSs |d S)Nrrw)xrwrwrxryu szOr.parseImpl..)rz no defined alternatives to matchrs) r4rrr9rrrrarsortrtr) rr-rro maxExcLoc maxExceptionrrZloc2r_rwrwrxr` s<     z Or.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrx__ixor__ s  z Or.__ixor__cCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z ^ css|]}t|VqdS)N)r)rrrwrwrxr szOr.__str__..r=)rrrUrr4)rrwrwrxr s    z Or.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxr s zOr.checkRecursion)F)T) rrrrrrrCrrrrwrw)rHrxrK s   & csDeZdZdZdfdd ZdddZdd Zd d Zd d ZZ S)ra Requires that at least one C{ParseExpression} is found. If two expressions match, the first one listed is the one that will match. May be constructed using the C{'|'} operator. Example:: # construct MatchFirst using '|' operator # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] # put more selective expression first number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] Fcs:tt|j|||jr0tdd|jD|_nd|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr sz&MatchFirst.__init__..T)rrrr4rr[)rr4rg)rHrwrxr szMatchFirst.__init__Tc Csd}d}x|jD]}y|j|||}|Stk r\}z|j|krL|}|j}WYdd}~Xqtk rt||krt|t||j|}t|}YqXqW|dk r|j|_|nt||d|dS)Nrrz no defined alternatives to matchrs)r4rtrrrrrar) rr-rror@rArrrrwrwrxr s$   zMatchFirst.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrx__ior__ s  zMatchFirst.__ior__cCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z | css|]}t|VqdS)N)r)rrrwrwrxr sz%MatchFirst.__str__..r=)rrrUrr4)rrwrwrxr s    zMatchFirst.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxr s zMatchFirst.checkRecursion)F)T) rrrrrrrDrrrrwrw)rHrxr s   cs<eZdZdZd fdd Zd ddZddZd d ZZS) r am Requires all given C{ParseExpression}s to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the C{'&'} operator. Example:: color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) shape_attr = "shape:" + shape_type("shape") posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") color_attr = "color:" + color("color") size_attr = "size:" + integer("size") # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) shape_spec.runTests(''' shape: SQUARE color: BLACK posn: 100, 120 shape: CIRCLE size: 50 color: BLUE posn: 50,80 color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) prints:: shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK - posn: ['100', ',', '120'] - x: 100 - y: 120 - shape: SQUARE shape: CIRCLE size: 50 color: BLUE posn: 50,80 ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - color: BLUE - posn: ['50', ',', '80'] - x: 50 - y: 80 - shape: CIRCLE - size: 50 color: GREEN size: 20 shape: TRIANGLE posn: 20,40 ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - color: GREEN - posn: ['20', ',', '40'] - x: 20 - y: 40 - shape: TRIANGLE - size: 20 Tcs8tt|j||tdd|jD|_d|_d|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxrsz Each.__init__..T)rr rr6r4r[rXinitExprGroups)rr4rg)rHrwrxrsz Each.__init__c s|jrtdd|jD|_dd|jD}dd|jD}|||_dd|jD|_dd|jD|_dd|jD|_|j|j7_d |_|}|jdd}|jddg}d } x| rp||j|j} g} x~| D]v} y| j||}Wn t k r| j | YqX|j |jj t | | | |krD|j | q| krj | qWt| t| krd } qW|rd jd d|D} t ||d | |fdd|jD7}g}x*|D]"} | j|||\}}|j |qWt|tg}||fS)Ncss&|]}t|trt|j|fVqdS)N)rzrrr.)rrrwrwrxrsz!Each.parseImpl..cSsg|]}t|tr|jqSrw)rzrr.)rrrwrwrxrsz"Each.parseImpl..cSs"g|]}|jrt|t r|qSrw)r[rzr)rrrwrwrxrscSsg|]}t|tr|jqSrw)rzr2r.)rrrwrwrxr scSsg|]}t|tr|jqSrw)rzrr.)rrrwrwrxr!scSs g|]}t|tttfs|qSrw)rzrr2r)rrrwrwrxr"sFTz, css|]}t|VqdS)N)r)rrrwrwrxr=sz*Missing one or more required elements (%s)cs$g|]}t|tr|jkr|qSrw)rzrr.)rr)tmpOptrwrxrAs)rErr4Zopt1mapZ optionalsZmultioptionalsZ multirequiredZrequiredrrrrrremoverrrtsumr")rr-rroZopt1Zopt2ZtmpLocZtmpReqdZ matchOrderZ keepMatchingZtmpExprsZfailedrZmissingr:rZ finalResultsrw)rFrxrsP     zEach.parseImplcCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z & css|]}t|VqdS)N)r)rrrwrwrxrPszEach.__str__..r=)rrrUrr4)rrwrwrxrKs    z Each.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxrTs zEach.checkRecursion)T)T) rrrrrrrrrrwrw)rHrxr s 5 1 csleZdZdZdfdd ZdddZdd Zfd d Zfd d ZddZ gfddZ fddZ Z S)rza Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. Fcstt|j|t|tr@ttjtr2tj|}ntjt |}||_ d|_ |dk r|j |_ |j |_ |j|j|j|_|j|_|j|_|jj|jdS)N)rrrrzr issubclassr$rQr,rr.rUr`r[rrYrXrWrer]r)rr.rg)rHrwrxr^s    zParseElementEnhance.__init__TcCs2|jdk r|jj|||ddStd||j|dS)NF)rpr)r.rtrra)rr-rrorwrwrxrps zParseElementEnhance.parseImplcCs*d|_|jj|_|jdk r&|jj|S)NF)rXr.rr)rrwrwrxrvs    z#ParseElementEnhance.leaveWhitespacecsrt|trB||jkrntt|j||jdk rn|jj|jdn,tt|j||jdk rn|jj|jd|S)Nrrrsrs)rzr+r]rrrr.)rr)rHrwrxr}s    zParseElementEnhance.ignorecs&tt|j|jdk r"|jj|S)N)rrrr.)r)rHrwrxrs  zParseElementEnhance.streamlinecCsB||krt||g|dd|g}|jdk r>|jj|dS)N)r&r.r)rrr;rwrwrxrs  z"ParseElementEnhance.checkRecursioncCs6|dd|g}|jdk r(|jj||jgdS)N)r.rr)rrr7rwrwrxrs  zParseElementEnhance.validatec sVytt|jStk r"YnX|jdkrP|jdk rPd|jjt|jf|_|jS)Nz%s:(%s)) rrrrKrUr.rHrr)r)rHrwrxrszParseElementEnhance.__str__)F)T) rrrrrrrrrrrrrrwrw)rHrxrZs   cs*eZdZdZfddZdddZZS)ra Lookahead matching of the given parse expression. C{FollowedBy} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. C{FollowedBy} always returns a null token list. Example:: # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] cstt|j|d|_dS)NT)rrrr[)rr.)rHrwrxrszFollowedBy.__init__TcCs|jj|||gfS)N)r.r)rr-rrorwrwrxrszFollowedBy.parseImpl)T)rrrrrrrrwrw)rHrxrs cs2eZdZdZfddZd ddZddZZS) ra Lookahead to disallow matching with the given parse expression. C{NotAny} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression does I{not} match at the current position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} always returns a null token list. May be constructed using the '~' operator. Example:: cs0tt|j|d|_d|_dt|j|_dS)NFTzFound unwanted token, )rrrrXr[rr.ra)rr.)rHrwrxrszNotAny.__init__TcCs&|jj||rt|||j||gfS)N)r.rrra)rr-rrorwrwrxrszNotAny.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrz~{r=)rrrUrr.)rrwrwrxrs   zNotAny.__str__)T)rrrrrrrrrwrw)rHrxrs   cs(eZdZdfdd ZdddZZS) _MultipleMatchNcsFtt|j|d|_|}t|tr.tj|}|dk r<|nd|_dS)NT) rrJrrWrzrr$rQ not_ender)rr.stopOnZender)rHrwrxrs   z_MultipleMatch.__init__Tc Cs|jj}|j}|jdk }|r$|jj}|r2|||||||dd\}}yZ|j } xJ|rb|||| rr|||} n|} ||| |\}} | s| jrT|| 7}qTWWnttfk rYnX||fS)NF)rp) r.rtrrKrr]rrr) rr-rroZself_expr_parseZself_skip_ignorablesZ check_enderZ try_not_enderrZhasIgnoreExprsrZ tmptokensrwrwrxrs,      z_MultipleMatch.parseImpl)N)T)rrrrrrrwrw)rHrxrJsrJc@seZdZdZddZdS)ra Repetition of one or more of the given expression. Parameters: - expr - expression that must match one or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] # could also be written as (attr_expr * (1,)).parseString(text).pprint() cCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrr<z}...)rrrUrr.)rrwrwrxr!s   zOneOrMore.__str__N)rrrrrrwrwrwrxrscs8eZdZdZd fdd Zd fdd Zdd ZZS) r2aw Optional repetition of zero or more of the given expression. Parameters: - expr - expression that must match zero or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example: similar to L{OneOrMore} Ncstt|j||dd|_dS)N)rLT)rr2rr[)rr.rL)rHrwrxr6szZeroOrMore.__init__Tc s6ytt|j|||Sttfk r0|gfSXdS)N)rr2rrr)rr-rro)rHrwrxr:szZeroOrMore.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrrz]...)rrrUrr.)rrwrwrxr@s   zZeroOrMore.__str__)N)T)rrrrrrrrrwrw)rHrxr2*s c@s eZdZddZeZddZdS) _NullTokencCsdS)NFrw)rrwrwrxrJsz_NullToken.__bool__cCsdS)Nrrw)rrwrwrxrMsz_NullToken.__str__N)rrrrr'rrwrwrwrxrMIsrMcs6eZdZdZeffdd Zd ddZddZZS) raa Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 # ZIP+4 form 12101-0001 # invalid ZIP 98765- ''') prints:: # traditional ZIP code 12345 ['12345'] # ZIP+4 form 12101-0001 ['12101-0001'] # invalid ZIP 98765- ^ FAIL: Expected end of text (at char 5), (line:1, col:6) cs.tt|j|dd|jj|_||_d|_dS)NF)rgT)rrrr.rWrr[)rr.r)rHrwrxrts zOptional.__init__Tc Cszy|jj|||dd\}}WnTttfk rp|jtk rh|jjr^t|jg}|j||jj<ql|jg}ng}YnX||fS)NF)rp)r.rtrrr_optionalNotMatchedrVr")rr-rrorrwrwrxrzs    zOptional.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrrr )rrrUrr.)rrwrwrxrs   zOptional.__str__)T) rrrrrNrrrrrwrw)rHrxrQs" cs,eZdZdZd fdd Zd ddZZS) r(a Token for skipping over all undefined text until the matched expression is found. Parameters: - expr - target expression marking the end of the data to be skipped - include - (default=C{False}) if True, the target expression is also parsed (the skipped text and target expression are returned as a 2-element list). - ignore - (default=C{None}) used to define grammars (typically quoted strings and comments) that might contain false matches to the target expression - failOn - (default=C{None}) define expressions that are not allowed to be included in the skipped test; if found before the target expression is found, the SkipTo is not a match Example:: report = ''' Outstanding Issues Report - 1 Jan 2000 # | Severity | Description | Days Open -----+----------+-------------------------------------------+----------- 101 | Critical | Intermittent system crash | 6 94 | Cosmetic | Spelling error on Login ('log|n') | 14 79 | Minor | System slow when running too many reports | 47 ''' integer = Word(nums) SEP = Suppress('|') # use SkipTo to simply match everything up until the next SEP # - ignore quoted strings, so that a '|' character inside a quoted string does not match # - parse action will call token.strip() for each matched token, i.e., the description body string_data = SkipTo(SEP, ignore=quotedString) string_data.setParseAction(tokenMap(str.strip)) ticket_expr = (integer("issue_num") + SEP + string_data("sev") + SEP + string_data("desc") + SEP + integer("days_open")) for tkt in ticket_expr.searchString(report): print tkt.dump() prints:: ['101', 'Critical', 'Intermittent system crash', '6'] - days_open: 6 - desc: Intermittent system crash - issue_num: 101 - sev: Critical ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - days_open: 14 - desc: Spelling error on Login ('log|n') - issue_num: 94 - sev: Cosmetic ['79', 'Minor', 'System slow when running too many reports', '47'] - days_open: 47 - desc: System slow when running too many reports - issue_num: 79 - sev: Minor FNcs`tt|j|||_d|_d|_||_d|_t|t rFt j ||_ n||_ dt |j|_dS)NTFzNo match found for )rr(r ignoreExprr[r` includeMatchrrzrr$rQfailOnrr.ra)rrincluderrQ)rHrwrxrs zSkipTo.__init__Tc Cs,|}t|}|j}|jj}|jdk r,|jjnd}|jdk rB|jjnd} |} x| |kr|dk rh||| rhP| dk rx*y| || } Wqrtk rPYqrXqrWy||| dddWn tt fk r| d7} YqLXPqLWt|||j || }|||} t | } |j r$||||dd\}} | | 7} || fS)NF)rorprr)rp) rr.rtrQrrOrrrrrar"rP)rr-rror0rr.Z expr_parseZself_failOn_canParseNextZself_ignoreExpr_tryParseZtmplocZskiptextZ skipresultrrwrwrxrs<    zSkipTo.parseImpl)FNN)T)rrrrrrrrwrw)rHrxr(s6 csbeZdZdZdfdd ZddZddZd d Zd d Zgfd dZ ddZ fddZ Z S)raK Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. Note: take care when assigning to C{Forward} not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: fwdExpr << a | b | c will actually be evaluated as:: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. It is recommended that you explicitly group the values inserted into the C{Forward}:: fwdExpr << (a | b | c) Converting to use the '<<=' operator instead will avoid this problem. See L{ParseResults.pprint} for an example of a recursive parser created using C{Forward}. Ncstt|j|dddS)NF)rg)rrr)rr)rHrwrxrszForward.__init__cCsjt|trtj|}||_d|_|jj|_|jj|_|j|jj |jj |_ |jj |_ |j j |jj |S)N)rzrr$rQr.rUr`r[rrYrXrWr]r)rrrwrwrx __lshift__s      zForward.__lshift__cCs||>S)Nrw)rrrwrwrx __ilshift__'szForward.__ilshift__cCs d|_|S)NF)rX)rrwrwrxr*szForward.leaveWhitespacecCs$|js d|_|jdk r |jj|S)NT)r_r.r)rrwrwrxr.s   zForward.streamlinecCs>||kr0|dd|g}|jdk r0|jj||jgdS)N)r.rr)rrr7rwrwrxr5s   zForward.validatec Cs>t|dr|jS|jjdSd}Wd|j|_X|jjd|S)Nrz: ...Nonez: )rrrHrZ _revertClass_ForwardNoRecurser.r)rZ retStringrwrwrxr<s   zForward.__str__cs.|jdk rtt|jSt}||K}|SdS)N)r.rrr)rr)rHrwrxrMs  z Forward.copy)N) rrrrrrSrTrrrrrrrwrw)rHrxrs  c@seZdZddZdS)rVcCsdS)Nz...rw)rrwrwrxrVsz_ForwardNoRecurse.__str__N)rrrrrwrwrwrxrVUsrVcs"eZdZdZdfdd ZZS)r-zQ Abstract subclass of C{ParseExpression}, for converting parsed results. Fcstt|j|d|_dS)NF)rr-rrW)rr.rg)rHrwrxr]szTokenConverter.__init__)F)rrrrrrrwrw)rHrxr-Yscs6eZdZdZd fdd ZfddZdd ZZS) r a Converter to concatenate all matching tokens to a single string. By default, the matching patterns must also be contiguous in the input string; this can be disabled by specifying C{'adjacent=False'} in the constructor. Example:: real = Word(nums) + '.' + Word(nums) print(real.parseString('3.1416')) # -> ['3', '.', '1416'] # will also erroneously match the following print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] real = Combine(Word(nums) + '.' + Word(nums)) print(real.parseString('3.1416')) # -> ['3.1416'] # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) rTcs8tt|j||r|j||_d|_||_d|_dS)NT)rr rradjacentrX joinStringre)rr.rXrW)rHrwrxrrszCombine.__init__cs(|jrtj||ntt|j||S)N)rWr$rrr )rr)rHrwrxr|szCombine.ignorecCsP|j}|dd=|tdj|j|jg|jd7}|jrH|jrH|gS|SdS)Nr)r)rr"rr rXrbrVr)rr-rrZretToksrwrwrxrs  "zCombine.postParse)rT)rrrrrrrrrwrw)rHrxr as cs(eZdZdZfddZddZZS)ra Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. Example:: ident = Word(alphas) num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] cstt|j|d|_dS)NT)rrrrW)rr.)rHrwrxrszGroup.__init__cCs|gS)Nrw)rr-rrrwrwrxrszGroup.postParse)rrrrrrrrwrw)rHrxrs  cs(eZdZdZfddZddZZS)r aW Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) # access named fields as dict entries, or output as dict print(result['shape']) print(result.asDict()) prints:: ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} See more examples at L{ParseResults} of accessing fields by results name. cstt|j|d|_dS)NT)rr rrW)rr.)rHrwrxrsz Dict.__init__cCsxt|D]\}}t|dkr q |d}t|trBt|dj}t|dkr^td|||<q t|dkrt|dt rt|d|||<q |j}|d=t|dkst|tr|j rt||||<q t|d|||<q W|j r|gS|SdS)Nrrrrrq) rrrzrurrrr"rrrV)rr-rrrtokZikeyZ dictvaluerwrwrxrs$   zDict.postParse)rrrrrrrrwrw)rHrxr s# c@s eZdZdZddZddZdS)r+aV Converter for ignoring the results of a parsed expression. Example:: source = "a, b, c,d" wd = Word(alphas) wd_list1 = wd + ZeroOrMore(',' + wd) print(wd_list1.parseString(source)) # often, delimiters that are useful during parsing are just in the # way afterward - use Suppress to keep them out of the parsed output wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parseString(source)) prints:: ['a', ',', 'b', ',', 'c', ',', 'd'] ['a', 'b', 'c', 'd'] (See also L{delimitedList}.) cCsgS)Nrw)rr-rrrwrwrxrszSuppress.postParsecCs|S)Nrw)rrwrwrxrszSuppress.suppressN)rrrrrrrwrwrwrxr+sc@s(eZdZdZddZddZddZdS) rzI Wrapper for parse actions, to ensure they are only called once. cCst||_d|_dS)NF)rMcallablecalled)rZ methodCallrwrwrxrs zOnlyOnce.__init__cCs.|js|j|||}d|_|St||ddS)NTr)r[rZr)rrr5rvrrwrwrxrs zOnlyOnce.__call__cCs d|_dS)NF)r[)rrwrwrxreset szOnlyOnce.resetN)rrrrrrr\rwrwrwrxrsc s:tfdd}y j|_Wntk r4YnX|S)as Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <>entering %s(line: '%s', %d, %r) z<.z)rMrr)rr`rw)rrxrb s  ,FcCs`t|dt|dt|d}|rBt|t||j|S|tt||j|SdS)a Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing C{combine=True} in the constructor. If C{combine} is set to C{True}, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. Example:: delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] z [rz]...N)rr r2rir+)r.ZdelimcombineZdlNamerwrwrxr@9s $csjtfdd}|dkr0ttjdd}n|j}|jd|j|dd|jd td S) a: Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] cs.|d}|r ttg|p&tt>gS)Nr)rrrC)rr5rvr) arrayExprr.rwrxcountFieldParseAction_s"z+countedArray..countFieldParseActionNcSs t|dS)Nr)ru)rvrwrwrxrydszcountedArray..ZarrayLenT)rfz(len) z...)rr/rRrrrirxr)r.ZintExprrdrw)rcr.rxr<Ls cCs:g}x0|D](}t|tr(|jt|q |j|q W|S)N)rzrrrr)Lrrrwrwrxrks   rcs6tfdd}|j|ddjdt|S)a* Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. csP|rBt|dkr|d>qLt|j}tdd|D>n t>dS)Nrrrcss|]}t|VqdS)N)r)rttrwrwrxrszDmatchPreviousLiteral..copyTokenToRepeater..)rrrrr )rr5rvZtflat)reprwrxcopyTokenToRepeaters   z1matchPreviousLiteral..copyTokenToRepeaterT)rfz(prev) )rrxrir)r.rhrw)rgrxrOts  csFt|j}|Kfdd}|j|ddjdt|S)aS Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do I{not} use with packrat parsing enabled. cs*t|jfdd}j|dddS)Ncs$t|j}|kr tddddS)Nrr)rrr)rr5rvZ theseTokens) matchTokensrwrxmustMatchTheseTokenss zLmatchPreviousExpr..copyTokenToRepeater..mustMatchTheseTokensT)rf)rrr)rr5rvrj)rg)rirxrhs  z.matchPreviousExpr..copyTokenToRepeaterT)rfz(prev) )rrrxrir)r.Ze2rhrw)rgrxrNs cCs>xdD]}|j|t|}qW|jdd}|jdd}t|S)Nz\^-]rz\nr(z\t)r_bslashr)rrrwrwrxrs    rTc s|rdd}dd}tndd}dd}tg}t|trF|j}n&t|tjr\t|}ntj dt dd|svt Sd }x|t |d kr||}xnt ||d d D]N\}} || |r|||d =Pq||| r|||d =|j|| | }PqW|d 7}q|W| r|ryht |t d j|krZtd d jdd|Djdj|Stdjdd|Djdj|SWn&tk rtj dt ddYnXtfdd|Djdj|S)a Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns a C{L{MatchFirst}} for best performance. Parameters: - strs - a string of space-delimited literals, or a collection of string literals - caseless - (default=C{False}) - treat all literals as caseless - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) Example:: comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) prints:: [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] cSs|j|jkS)N)r)rbrwrwrxryszoneOf..cSs|jj|jS)N)rr)rrlrwrwrxryscSs||kS)Nrw)rrlrwrwrxryscSs |j|S)N)r)rrlrwrwrxrysz6Invalid argument to oneOf, expected string or iterablerq)rrrrNrz[%s]css|]}t|VqdS)N)r)rsymrwrwrxrszoneOf..z | |css|]}tj|VqdS)N)rdr )rrmrwrwrxrsz7Exception creating Regex for oneOf, building MatchFirstc3s|]}|VqdS)Nrw)rrm)parseElementClassrwrxrs)rrrzrrrr5rrrrrrrrrr'rirKr) ZstrsrZuseRegexZisequalZmasksZsymbolsrZcurrrrw)rorxrSsL         ((cCsttt||S)a Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the C{Dict} results can include named token fields. Example:: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) # similar to Dict, but simpler call format result = dictOf(attr_label, attr_value).parseString(text) print(result.dump()) print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} )r r2r)rrrwrwrxrAs!cCs^tjdd}|j}d|_|d||d}|r@dd}ndd}|j||j|_|S) a Helper to return the original, untokenized text for a given expression. Useful to restore the parsed fields of an HTML start tag into the raw tag text itself, or to revert separate tokens with intervening whitespace back to the original matching input text. By default, returns astring containing the original parsed text. If the optional C{asString} argument is passed as C{False}, then the return value is a C{L{ParseResults}} containing any results names that were originally matched, and a single token containing the original matched text from the input string. So if the expression passed to C{L{originalTextFor}} contains expressions with defined results names, you must set C{asString} to C{False} if you want to preserve those results name values. Example:: src = "this is test bold text normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) prints:: [' bold text '] ['text'] cSs|S)Nrw)rrrvrwrwrxry8sz!originalTextFor..F_original_start _original_endcSs||j|jS)N)rprq)rr5rvrwrwrxry=scSs&||jd|jdg|dd<dS)Nrprq)r)rr5rvrwrwrx extractText?sz$originalTextFor..extractText)r rrrer])r.ZasStringZ locMarkerZ endlocMarker matchExprrrrwrwrxrg s  cCst|jddS)zp Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. cSs|dS)Nrrw)rvrwrwrxryJszungroup..)r-r)r.rwrwrxrhEscCs4tjdd}t|d|d|jjdS)a Helper to decorate a returned token with its starting and ending locations in the input string. This helper adds the following results names: - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains C{} characters, you may want to call C{L{ParserElement.parseWithTabs}} Example:: wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) prints:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] cSs|S)Nrw)rr5rvrwrwrxry`szlocatedExpr..Z locn_startrZlocn_end)r rrrr)r.ZlocatorrwrwrxrjLsz\[]-*.$+^?()~ )r cCs |ddS)Nrrrrw)rr5rvrwrwrxryksryz\\0?[xX][0-9a-fA-F]+cCstt|djddS)Nrz\0x)unichrrulstrip)rr5rvrwrwrxrylsz \\0[0-7]+cCstt|ddddS)Nrrr)ruru)rr5rvrwrwrxrymsz\])rr z\wr8rrZnegatebodyr c sBddy djfddtj|jDStk r<dSXdS)a Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be: - a single character - an escaped character with a leading backslash (such as C{\-} or C{\]}) - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) (C{\0x##} is also supported for backwards compatibility) - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) cSs<t|ts|Sdjddtt|dt|ddDS)Nrcss|]}t|VqdS)N)ru)rrrwrwrxrsz+srange....rrr)rzr"rrord)prwrwrxryszsrange..rc3s|]}|VqdS)Nrw)rpart) _expandedrwrxrszsrange..N)r_reBracketExprrrxrK)rrw)r|rxr_rs  csfdd}|S)zt Helper method for defining parse actions that require matching at a specific column in the input text. cs"t||krt||ddS)Nzmatched token not at column %d)r9r)r)Zlocnr1)rrwrx verifyColsz!matchOnlyAtCol..verifyColrw)rr~rw)rrxrMs cs fddS)a Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString}()}. Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] csgS)Nrw)rr5rv)replStrrwrxryszreplaceWith..rw)rrw)rrxr\s cCs|dddS)a Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] rrrrsrw)rr5rvrwrwrxrZs c sNfdd}ytdtdj}Wntk rBt}YnX||_|S)aG Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional args are passed, they are forwarded to the given function as additional arguments after the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the parsed data to an integer using base 16. Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse ''') wd = Word(alphas).setParseAction(tokenMap(str.title)) OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') prints:: 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] my kingdom for a horse ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] csfdd|DS)Ncsg|]}|fqSrwrw)rZtokn)rr6rwrxrsz(tokenMap..pa..rw)rr5rv)rr6rwrxr}sztokenMap..parrH)rJrrKr{)r6rr}rLrw)rr6rxrms cCs t|jS)N)rr)rvrwrwrxryscCs t|jS)N)rlower)rvrwrwrxryscCst|tr|}t|| d}n|j}tttd}|rtjj t }t d|dt t t|t d|tddgdjd j d d t d }nd jddtD}tjj t t|B}t d|dt t t|j ttt d|tddgdjd j dd t d }ttd|d }|jdd j|jddjjjd|}|jdd j|jddjjjd|}||_||_||fS)zRInternal helper to construct opening and closing tag expressions, given a tag name)rz_-:rtag=/F)rrCcSs |ddkS)Nrrrw)rr5rvrwrwrxrysz_makeTags..rrcss|]}|dkr|VqdS)rNrw)rrrwrwrxrsz_makeTags..cSs |ddkS)Nrrrw)rr5rvrwrwrxryszrz)rzrrrr/r4r3r>rrrZr+r r2rrrmrrVrYrBr _Lrtitlerrir)tagStrZxmlZresnameZ tagAttrNameZ tagAttrValueZopenTagZprintablesLessRAbrackZcloseTagrwrwrx _makeTagss" T\..rcCs t|dS)a  Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. Example:: text = 'More info at the pyparsing wiki page' # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): # attributes in the tag (like "href" shown here) are also accessible as named results print(link.link_text, '->', link.href) prints:: pyparsing -> http://pyparsing.wikispaces.com F)r)rrwrwrxrKscCs t|dS)z Helper to construct opening and closing tag expressions for XML, given a tag name. Matches tags only in the given upper/lower case. Example: similar to L{makeHTMLTags} T)r)rrwrwrxrLscs8|r|ddn|jddDfdd}|S)a< Helper to create a validating parse action to be used with start tags created with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as C{} or C{
}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 NcSsg|]\}}||fqSrwrw)rrrrwrwrxrQsz!withAttribute..cs^xXD]P\}}||kr&t||d||tjkr|||krt||d||||fqWdS)Nzno matching attribute z+attribute '%s' has value '%s', must be '%s')rre ANY_VALUE)rr5rZattrNameZ attrValue)attrsrwrxr}Rs zwithAttribute..pa)r)rZattrDictr}rw)rrxres 2 cCs|r d|nd}tf||iS)a Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this <div> has no class
''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 z%s:classclass)re)Z classname namespaceZ classattrrwrwrxrk\s (rcCst}||||B}x`t|D]R\}}|d dd\}} } } | dkrTd|nd|} | dkr|dksxt|dkrtd|\} }tj| }| tjkrd| dkrt||t|t |}n| dkr|dk rt|||t|t ||}nt||t|t |}nD| dkrZt|| |||t|| |||}ntd n| tj krH| dkrt |t st |}t|j |t||}n| dkr|dk rt|||t|t ||}nt||t|t |}nD| dkr>t|| |||t|| |||}ntd ntd | r`|j| ||j| |BK}|}q"W||K}|S) a Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] Nrroz%s termz %s%s termrqz@if numterms=3, opExpr must be a tuple or list of two expressionsrrz6operator must be unary (1), binary (2), or ternary (3)z2operator must indicate right or left associativity)N)rrrrrirTLEFTrrrRIGHTrzrr.r)ZbaseExprZopListZlparZrparrZlastExprrZoperDefZopExprZarityZrightLeftAssocr}ZtermNameZopExpr1ZopExpr2ZthisExprrsrwrwrxrisR;    &       &   z4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"z string enclosed in double quotesz4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*'z string enclosed in single quotesz*quotedString using single or double quotesuzunicode string literalcCs||krtd|dkr(t|to,t|tr t|dkrt|dkr|dk rtt|t||tjddj dd}n$t j t||tjj dd}nx|dk rtt|t |t |ttjddj dd}n4ttt |t |ttjddj d d}ntd t }|dk rb|tt|t||B|Bt|K}n$|tt|t||Bt|K}|jd ||f|S) a~ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] z.opening and closing strings cannot be the sameNrr)r cSs |djS)Nr)r)rvrwrwrxry9sznestedExpr..cSs |djS)Nr)r)rvrwrwrxry<scSs |djS)Nr)r)rvrwrwrxryBscSs |djS)Nr)r)rvrwrwrxryFszOopening and closing arguments must be strings if no content expression is givenznested %s%s expression)rrzrrr rr r$rNrrCrrrrr+r2ri)openerZcloserZcontentrOrrwrwrxrPs4:     *$c sfdd}fdd}fdd}ttjdj}ttj|jd}tj|jd }tj|jd } |rtt||t|t|t|| } n$tt|t|t|t|} |j t t| jd S) a Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] csN|t|krdSt||}|dkrJ|dkr>t||dt||ddS)Nrrzillegal nestingznot a peer entryrsrs)rr9r!r)rr5rvcurCol) indentStackrwrxcheckPeerIndents     z&indentedBlock..checkPeerIndentcs2t||}|dkr"j|n t||ddS)Nrrznot a subentryrs)r9rr)rr5rvr)rrwrxcheckSubIndents   z%indentedBlock..checkSubIndentcsN|t|krdSt||}o4|dko4|dksBt||djdS)Nrrrqznot an unindentrsr:)rr9rr)rr5rvr)rrwrx checkUnindents    z$indentedBlock..checkUnindentz INDENTrZUNINDENTzindented block) rrrrr rrirrrrk) ZblockStatementExprrrrrrr!rZPEERZUNDENTZsmExprrw)rrxrfQsN   ,z#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]z[\0xa1-\0xbf\0xd7\0xf7]z_:zany tagzgt lt amp nbsp quot aposz><& "'z &(?Prnz);zcommon HTML entitycCs tj|jS)zRHelper parser action to replace common HTML entities with their special characters)_htmlEntityMaprZentity)rvrwrwrxr[sz/\*(?:[^*]|\*(?!/))*z*/zC style commentzz HTML commentz.*z rest of linez//(?:\\\n|[^\n])*z // commentzC++ style commentz#.*zPython style comment)rz commaItem)rc@seZdZdZeeZeeZe e j dj eZ e ej dj eedZedj dj eZej edej ej dZejd d eeeed jeBj d Zejeed j dj eZedj dj eZeeBeBjZedj dj eZe ededj dZedj dZ edj dZ!e!de!dj dZ"ee!de!d>dee!de!d?j dZ#e#j$d d d!e j d"Z%e&e"e%Be#Bj d#j d#Z'ed$j d%Z(e)d@d'd(Z*e)dAd*d+Z+ed,j d-Z,ed.j d/Z-ed0j d1Z.e/je0jBZ1e)d2d3Z2e&e3e4d4e5e e6d4d5ee7d6jj d7Z8e9ee:j;e8Bd8d9j d:Zd=S)Brna Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers}, L{reals}, L{scientific notation}) - common L{programming identifiers} - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - ISO8601 L{dates} and L{datetime} - L{UUID} - L{comma-separated list} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] integerz hex integerrtz[+-]?\d+zsigned integerrfractioncCs|d|dS)Nrrrrsrw)rvrwrwrxryszpyparsing_common.r8z"fraction or mixed integer-fractionz [+-]?\d+\.\d*z real numberz+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)z$real number with scientific notationz[+-]?\d+\.?\d*([eE][+-]?\d+)?fnumberrB identifierzK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}z IPv4 addressz[0-9a-fA-F]{1,4} hex_integerrzfull IPv6 addressrrBz::zshort IPv6 addresscCstdd|DdkS)Ncss|]}tjj|rdVqdS)rrN)rn _ipv6_partr)rrfrwrwrxrsz,pyparsing_common...rw)rH)rvrwrwrxrysz::ffff:zmixed IPv6 addressz IPv6 addressz:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}z MAC address%Y-%m-%dcsfdd}|S)a Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] csLytj|djStk rF}zt||t|WYdd}~XnXdS)Nr)rstrptimeZdaterrr{)rr5rvve)fmtrwrxcvt_fnsz.pyparsing_common.convertToDate..cvt_fnrw)rrrw)rrx convertToDates zpyparsing_common.convertToDate%Y-%m-%dT%H:%M:%S.%fcsfdd}|S)a Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] csHytj|dStk rB}zt||t|WYdd}~XnXdS)Nr)rrrrr{)rr5rvr)rrwrxrsz2pyparsing_common.convertToDatetime..cvt_fnrw)rrrw)rrxconvertToDatetimes z"pyparsing_common.convertToDatetimez7(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?z ISO8601 datez(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?zISO8601 datetimez2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}UUIDcCstjj|dS)a Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = 'More info at the
pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' r)rn_html_stripperr)rr5rrwrwrx stripHTMLTagss zpyparsing_common.stripHTMLTagsra)rz rr)rzcomma separated listcCs t|jS)N)rr)rvrwrwrxryscCs t|jS)N)rr)rvrwrwrxrysN)rrB)rrB)r)r)?rrrrrmruZconvertToIntegerfloatZconvertToFloatr/rRrirrrDrr'Zsigned_integerrrxrrZ mixed_integerrHrealZsci_realrnumberrr4r3rZ ipv4_addressrZ_full_ipv6_addressZ_short_ipv6_addressr~Z_mixed_ipv6_addressr Z ipv6_addressZ mac_addressrrrZ iso8601_dateZiso8601_datetimeuuidr7r6rrrrrrVr. _commasepitemr@rYrZcomma_separated_listrdrBrwrwrwrxrnsN"" 2   8__main__Zselectfromz_$r])rbcolumnsrjZtablesZcommandaK # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual z] 100 -100 +100 3.14159 6.02e23 1e-12 z 100 FF z6 12345678-1234-5678-1234-567812345678 )rq)raF)N)FT)T)r)T)r __version__Z__versionTime__ __author__rweakrefrrrr~rrdrrr"r<rr_threadr ImportErrorZ threadingrrZ ordereddict__all__r version_infor;rmaxsizerr{rchrrurrHrrreversedrrrr6r r rIZmaxintZxrangerZ __builtin__rZfnamerrJrrrrrrZascii_uppercaseZascii_lowercaser4rRrDr3rkrZ printablerVrKrrr!r#r&rr"MutableMappingregisterr9rJrGr/r2r4rQrMr$r,r rrrrQrrrrlr/r'r%r r.r0rrrr*r)r1r0r rrrr rrrrJrr2rMrNrr(rrVr-r rr r+rrbr@r<rrOrNrrSrArgrhrjrirCrIrHrar`rZ _escapedPuncZ_escapedHexCharZ_escapedOctCharUNICODEZ _singleCharZ _charRangermr}r_rMr\rZrmrdrBrrKrLrerrkrTrrrirUr>r^rYrcrPrfr5rWr7r6rrrrr;r[r8rErr]r?r=rFrXrrr:rnrZ selectTokenZ fromTokenZidentZ columnNameZcolumnNameListZ columnSpecZ tableNameZ tableNameListZ simpleSQLrrrrrrrwrwrwrx=s                 8      @d &A= I G3pLOD|M &#@sQ,A,    I# %     &0 ,   ? #k Zr   (  0     "PKtge[0 KK$__pycache__/pyparsing.cpython-36.pycnu[3 vh@s dZdZdZdZddlZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlmZyddlmZWn ek rddlmZYnXydd l mZWn>ek rydd lmZWnek rdZYnXYnXd d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;dd?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgiZee jddsZeddskZer"e jZe Z!e"Z#e Z$e%e&e'e(e)ee*e+e,e-e.g Z/nbe j0Ze1Z2dtduZ$gZ/ddl3Z3xBdvj4D]6Z5ye/j6e7e3e5Wne8k r|wJYnXqJWe9dwdxe2dyDZ:dzd{Z;Gd|d}d}e<Z=ej>ej?Z@d~ZAeAdZBe@eAZCe"dZDdjEddxejFDZGGdd!d!eHZIGdd#d#eIZJGdd%d%eIZKGdd'd'eKZLGdd*d*eHZMGddde<ZNGdd&d&e<ZOe jPjQeOdd=ZRddNZSddKZTddZUddZVddZWddUZXd/ddZYGdd(d(e<ZZGdd0d0eZZ[Gddde[Z\Gddde[Z]Gddde[Z^e^Z_e^eZ_`Gddde[ZaGdd d e^ZbGdd d eaZcGddpdpe[ZdGdd3d3e[ZeGdd+d+e[ZfGdd)d)e[ZgGdd d e[ZhGdd2d2e[ZiGddde[ZjGdddejZkGdddejZlGdddejZmGdd.d.ejZnGdd-d-ejZoGdd5d5ejZpGdd4d4ejZqGdd$d$eZZrGdd d erZsGdd d erZtGddderZuGddderZvGdd"d"eZZwGdddewZxGdddewZyGdddewZzGdddezZ{Gdd6d6ezZ|Gddde<Z}e}Z~GdddewZGdd,d,ewZGdddewZGdddeZGdd1d1ewZGdddeZGdddeZGdddeZGdd/d/eZGddde<ZddfZd0ddDZd1dd@Zdd΄ZddSZddRZdd҄Zd2ddWZddEZd3ddkZddlZddnZe\jdGZeljdMZemjdLZenjdeZeojddZeeeDdddڍjdd܄Zefd݃jdd܄Zefd߃jdd܄ZeeBeBeeeGddydBefde jBZeeedeZe^dedjdee{eeBjddZddcZddQZdd`Zdd^ZddqZedd܄Zedd܄ZddZddOZddPZddiZe<e_d4ddoZe=Ze<e_e<e_ededfddmZeZeefddjdZeefddjdZeefddefddBjdZee_dejjdZdddejfddTZd5ddjZedZedZeeee@eCdjd\ZZeed j4d Zefd d jEejÃd jdZĐdd_ZeefddjdZefdjdZefdjȃjdZefdjdZeefddeBjdZeZefdjdZee{eeeGdɐdeeede^dɃemj΃jdZeeejeBddjd>ZGd drdrZeҐd!k rebd"Zebd#Zeee@eCd$ZeeՐd%dӐd&jeZeeeփjd'Zאd(eBZeeՐd%dӐd&jeZeeeكjd)ZeӐd*eؐd'eeڐd)Zejܐd+ejjܐd,ejjܐd,ejjܐd-ddlZejjeejejjܐd.dS(6aS pyparsing module - Classes and methods to define and execute parsing grammars The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements (L{'+'} operator gives L{And} expressions, strings are auto-converted to L{Literal} expressions):: from pyparsing import Word, alphas # define grammar of a greeting greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments z2.1.10z07 Oct 2016 01:31 UTCz*Paul McGuire N)ref)datetime)RLock) OrderedDictAndCaselessKeywordCaselessLiteral CharsNotInCombineDictEachEmpty FollowedByForward GoToColumnGroupKeywordLineEnd LineStartLiteral MatchFirstNoMatchNotAny OneOrMoreOnlyOnceOptionalOrParseBaseExceptionParseElementEnhanceParseExceptionParseExpressionParseFatalException ParseResultsParseSyntaxException ParserElement QuotedStringRecursiveGrammarExceptionRegexSkipTo StringEnd StringStartSuppressTokenTokenConverterWhiteWordWordEnd WordStart ZeroOrMore alphanumsalphas alphas8bit anyCloseTag anyOpenTag cStyleCommentcolcommaSeparatedListcommonHTMLEntity countedArraycppStyleCommentdblQuotedStringdblSlashComment delimitedListdictOfdowncaseTokensemptyhexnums htmlCommentjavaStyleCommentlinelineEnd lineStartlineno makeHTMLTags makeXMLTagsmatchOnlyAtColmatchPreviousExprmatchPreviousLiteral nestedExprnullDebugActionnumsoneOfopAssocoperatorPrecedence printablespunc8bitpythonStyleComment quotedString removeQuotesreplaceHTMLEntity replaceWith restOfLinesglQuotedStringsrange stringEnd stringStarttraceParseAction unicodeString upcaseTokens withAttribute indentedBlockoriginalTextForungroup infixNotation locatedExpr withClass CloseMatchtokenMappyparsing_commonc Cs`t|tr|Syt|Stk rZt|jtjd}td}|jdd|j |SXdS)aDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It then < returns the unicode object | encodes it with the default encoding | ... >. xmlcharrefreplacez&#\d+;cSs$dtt|dddddS)Nz\ur)hexint)trw/usr/lib/python3.6/pyparsing.pysz_ustr..N) isinstanceZunicodestrUnicodeEncodeErrorencodesysgetdefaultencodingr'setParseActiontransformString)objretZ xmlcharrefrwrwrx_ustrs rz6sum len sorted reversed list tuple set any all min maxccs|] }|VqdS)Nrw).0yrwrwrx srrrcCs>d}dddjD}x"t||D]\}}|j||}q"W|S)z/Escape &, <, >, ", ', etc. in a string of data.z&><"'css|]}d|dVqdS)&;Nrw)rsrwrwrxrsz_xml_escape..zamp gt lt quot apos)splitzipreplace)dataZ from_symbolsZ to_symbolsZfrom_Zto_rwrwrx _xml_escapes rc@s eZdZdS) _ConstantsN)__name__ __module__ __qualname__rwrwrwrxrsr 0123456789Z ABCDEFabcdef\ccs|]}|tjkr|VqdS)N)stringZ whitespace)rcrwrwrxrsc@sPeZdZdZdddZeddZdd Zd d Zd d Z dddZ ddZ dS)rz7base exception class for all parsing runtime exceptionsrNcCs>||_|dkr||_d|_n ||_||_||_|||f|_dS)Nr)locmsgpstr parserElementargs)selfrrrelemrwrwrx__init__szParseBaseException.__init__cCs||j|j|j|jS)z internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses )rrrr)clsperwrwrx_from_exceptionsz"ParseBaseException._from_exceptioncCsN|dkrt|j|jS|dkr,t|j|jS|dkrBt|j|jSt|dS)zsupported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text rJr9columnrGN)r9r)rJrrr9rGAttributeError)rZanamerwrwrx __getattr__szParseBaseException.__getattr__cCsd|j|j|j|jfS)Nz"%s (at char %d), (line:%d, col:%d))rrrJr)rrwrwrx__str__szParseBaseException.__str__cCst|S)N)r)rrwrwrx__repr__szParseBaseException.__repr__>!} ('-' operator) indicates that parsing is to stop immediately because an unbacktrackable syntax error has been foundN)rrrrrwrwrwrxr#sc@s eZdZdZddZddZdS)r&zZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs ||_dS)N)parseElementTrace)rparseElementListrwrwrxrsz"RecursiveGrammarException.__init__cCs d|jS)NzRecursiveGrammarException: %s)r)rrwrwrxr sz!RecursiveGrammarException.__str__N)rrrrrrrwrwrwrxr&sc@s,eZdZddZddZddZddZd S) _ParseResultsWithOffsetcCs||f|_dS)N)tup)rZp1Zp2rwrwrxr$sz _ParseResultsWithOffset.__init__cCs |j|S)N)r)rirwrwrx __getitem__&sz#_ParseResultsWithOffset.__getitem__cCst|jdS)Nr)reprr)rrwrwrxr(sz _ParseResultsWithOffset.__repr__cCs|jd|f|_dS)Nr)r)rrrwrwrx setOffset*sz!_ParseResultsWithOffset.setOffsetN)rrrrrrrrwrwrwrxr#src@seZdZdZd[ddZddddefddZdd Zefd d Zd d Z ddZ ddZ ddZ e Z ddZddZddZddZddZereZeZeZn$eZeZeZddZd d!Zd"d#Zd$d%Zd&d'Zd\d(d)Zd*d+Zd,d-Zd.d/Zd0d1Z d2d3Z!d4d5Z"d6d7Z#d8d9Z$d:d;Z%d} - see L{ParserElement.setResultsName}) Example:: integer = Word(nums) date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") # parseString returns a ParseResults object result = date_str.parseString("1999/12/31") def test(s, fn=repr): print("%s -> %s" % (s, fn(eval(s)))) test("list(result)") test("result[0]") test("result['month']") test("result.day") test("'month' in result") test("'minutes' in result") test("result.dump()", str) prints:: list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' result.day -> '31' 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - day: 31 - month: 12 - year: 1999 NTcCs"t||r|Stj|}d|_|S)NT)rzobject__new___ParseResults__doinit)rtoklistnameasListmodalZretobjrwrwrxrTs   zParseResults.__new__c Cs`|jrvd|_d|_d|_i|_||_||_|dkr6g}||trP|dd|_n||trft||_n|g|_t |_ |dk o|r\|sd|j|<||t rt |}||_||t dttfo|ddgfks\||tr|g}|r&||trt|jd||<ntt|dd||<|||_n6y|d||<Wn$tttfk rZ|||<YnXdS)NFrr)r_ParseResults__name_ParseResults__parent_ParseResults__accumNames_ParseResults__asList_ParseResults__modallist_ParseResults__toklist_generatorTypedict_ParseResults__tokdictrurr basestringr"rcopyKeyError TypeError IndexError)rrrrrrzrwrwrxr]sB     $   zParseResults.__init__cCsPt|ttfr|j|S||jkr4|j|ddStdd|j|DSdS)NrrrcSsg|] }|dqS)rrw)rvrwrwrx sz,ParseResults.__getitem__..rs)rzruslicerrrr")rrrwrwrxrs   zParseResults.__getitem__cCs||tr0|jj|t|g|j|<|d}nD||ttfrN||j|<|}n&|jj|tt|dg|j|<|}||trt||_ dS)Nr) rrgetrrurrr"wkrefr)rkrrzsubrwrwrx __setitem__s   " zParseResults.__setitem__c Cst|ttfrt|j}|j|=t|trH|dkr:||7}t||d}tt|j|}|jx^|j j D]F\}}x<|D]4}x.t |D]"\}\}} t || | |k||<qWq|WqnWn|j |=dS)Nrrr) rzrurlenrrrangeindicesreverseritems enumerater) rrZmylenZremovedr occurrencesjrvaluepositionrwrwrx __delitem__s   $zParseResults.__delitem__cCs ||jkS)N)r)rrrwrwrx __contains__szParseResults.__contains__cCs t|jS)N)rr)rrwrwrx__len__szParseResults.__len__cCs |j S)N)r)rrwrwrx__bool__szParseResults.__bool__cCs t|jS)N)iterr)rrwrwrx__iter__szParseResults.__iter__cCst|jdddS)Nrrrs)rr)rrwrwrx __reversed__szParseResults.__reversed__cCs$t|jdr|jjSt|jSdS)Niterkeys)hasattrrrr)rrwrwrx _iterkeyss  zParseResults._iterkeyscsfddjDS)Nc3s|]}|VqdS)Nrw)rr)rrwrxrsz+ParseResults._itervalues..)r)rrw)rrx _itervaluesszParseResults._itervaluescsfddjDS)Nc3s|]}||fVqdS)Nrw)rr)rrwrxrsz*ParseResults._iteritems..)r)rrw)rrx _iteritemsszParseResults._iteritemscCs t|jS)zVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).)rr)rrwrwrxkeysszParseResults.keyscCs t|jS)zXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).)r itervalues)rrwrwrxvaluesszParseResults.valuescCs t|jS)zfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).)r iteritems)rrwrwrxrszParseResults.itemscCs t|jS)zSince keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.)boolr)rrwrwrxhaskeysszParseResults.haskeyscOs|s dg}x6|jD]*\}}|dkr2|d|f}qtd|qWt|dtsht|dksh|d|kr|d}||}||=|S|d}|SdS)a Removes and returns item at specified index (default=C{last}). Supports both C{list} and C{dict} semantics for C{pop()}. If passed no argument or an integer argument, it will use C{list} semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use C{dict} semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in C{dict.pop()}. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] rrdefaultrz-pop() got an unexpected keyword argument '%s'Nrs)rrrzrur)rrkwargsrrindexrZ defaultvaluerwrwrxpops"  zParseResults.popcCs||kr||S|SdS)ai Returns named result matching the given key, or if there is no such name, then returns the given C{defaultValue} or C{None} if no C{defaultValue} is specified. Similar to C{dict.get()}. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' print(result.get("hour", "not specified")) # -> 'not specified' print(result.get("hour")) # -> None Nrw)rkey defaultValuerwrwrxrszParseResults.getcCsZ|jj||xF|jjD]8\}}x.t|D]"\}\}}t||||k||<q,WqWdS)a Inserts new element at location index in the list of parsed tokens. Similar to C{list.insert()}. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] N)rinsertrrrr)rrZinsStrrrrrrrwrwrxr2szParseResults.insertcCs|jj|dS)a Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] N)rappend)ritemrwrwrxrFs zParseResults.appendcCs$t|tr||7}n |jj|dS)a Add sequence of elements to end of ParseResults list of elements. Example:: patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) return ''.join(tokens) print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' N)rzr"rextend)rZitemseqrwrwrxrTs  zParseResults.extendcCs|jdd=|jjdS)z7 Clear all elements and results names. N)rrclear)rrwrwrxrfs zParseResults.clearc Csfy||Stk rdSX||jkr^||jkrD|j|ddStdd|j|DSndSdS)NrrrrcSsg|] }|dqS)rrw)rrrwrwrxrwsz,ParseResults.__getattr__..rs)rrrr")rrrwrwrxrms  zParseResults.__getattr__cCs|j}||7}|S)N)r)rotherrrwrwrx__add__{szParseResults.__add__cs|jrnt|jfdd|jj}fdd|D}x4|D],\}}|||<t|dtr>t||d_q>W|j|j7_|jj |j|S)Ncs|dkr S|S)Nrrw)a)offsetrwrxrysz'ParseResults.__iadd__..c s4g|],\}}|D]}|t|d|dfqqS)rrr)r)rrvlistr) addoffsetrwrxrsz)ParseResults.__iadd__..r) rrrrrzr"rrrupdate)rrZ otheritemsZotherdictitemsrrrw)rrrx__iadd__s    zParseResults.__iadd__cCs&t|tr|dkr|jS||SdS)Nr)rzrur)rrrwrwrx__radd__szParseResults.__radd__cCsdt|jt|jfS)Nz(%s, %s))rrr)rrwrwrxrszParseResults.__repr__cCsddjdd|jDdS)N[z, css(|] }t|trt|nt|VqdS)N)rzr"rr)rrrwrwrxrsz'ParseResults.__str__..])rr)rrwrwrxrszParseResults.__str__rcCsPg}xF|jD]<}|r"|r"|j|t|tr:||j7}q |jt|q W|S)N)rrrzr" _asStringListr)rsepoutrrwrwrxr s   zParseResults._asStringListcCsdd|jDS)a Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] cSs"g|]}t|tr|jn|qSrw)rzr"r)rresrwrwrxrsz'ParseResults.asList..)r)rrwrwrxrszParseResults.asListcs6tr |j}n|j}fddtfdd|DS)a Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} cs6t|tr.|jr|jSfdd|DSn|SdS)Ncsg|] }|qSrwrw)rr)toItemrwrxrsz7ParseResults.asDict..toItem..)rzr"rasDict)r)rrwrxrs  z#ParseResults.asDict..toItemc3s|]\}}||fVqdS)Nrw)rrr)rrwrxrsz&ParseResults.asDict..)PY_3rrr)rZitem_fnrw)rrxrs  zParseResults.asDictcCs8t|j}|jj|_|j|_|jj|j|j|_|S)zA Returns a new copy of a C{ParseResults} object. )r"rrrrrrr)rrrwrwrxrs   zParseResults.copyFc CsPd}g}tdd|jjD}|d}|s8d}d}d}d} |dk rJ|} n |jrV|j} | sf|rbdSd} |||d| d g7}xt|jD]\} } t| tr| |kr|| j|| |o|dk||g7}n|| jd|o|dk||g7}qd} | |kr|| } | s |rqnd} t t | } |||d| d | d | d g 7}qW|||d | d g7}dj |S) z (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.  css(|] \}}|D]}|d|fVqqdS)rrNrw)rrrrrwrwrxrsz%ParseResults.asXML..z rNZITEM<>z.z %s%s- %s: z rrcss|]}t|tVqdS)N)rzr")rvvrwrwrxrssz %s%s[%d]: %s%s%sr) rrrrsortedrrzr"dumpranyrr) rrdepthfullr NLrrrrrrwrwrxrPs,   4.zParseResults.dumpcOstj|jf||dS)a Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] N)pprintr)rrrrwrwrxr"}szParseResults.pprintcCs.|j|jj|jdk r|jp d|j|jffS)N)rrrrrr)rrwrwrx __getstate__s zParseResults.__getstate__cCsN|d|_|d\|_}}|_i|_|jj||dk rDt||_nd|_dS)Nrrr)rrrrrrr)rstaterZ inAccumNamesrwrwrx __setstate__s   zParseResults.__setstate__cCs|j|j|j|jfS)N)rrrr)rrwrwrx__getnewargs__szParseResults.__getnewargs__cCstt|t|jS)N)rrrr)rrwrwrxrszParseResults.__dir__)NNTT)N)r)NFrT)rrT)4rrrrrrzrrrrrrr __nonzero__rrrrrrrrrrrrrrrrrrrrrrrrrr rrrrrrrr"r#r%r&rrwrwrwrxr"-sh& ' 4  # =% - cCsF|}d|kot|knr4||ddkr4dS||jdd|S)aReturns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. rrrr)rrfind)rstrgrrwrwrxr9s cCs|jdd|dS)aReturns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. rrrr)count)rr)rwrwrxrJs cCsF|jdd|}|jd|}|dkr2||d|S||ddSdS)zfReturns the line of text containing loc within a string, counting newlines as line separators. rrrrN)r(find)rr)ZlastCRZnextCRrwrwrxrGs  cCs8tdt|dt|dt||t||fdS)NzMatch z at loc z(%d,%d))printrrJr9)instringrexprrwrwrx_defaultStartDebugActionsr/cCs$tdt|dt|jdS)NzMatched z -> )r,rr{r)r-startlocZendlocr.toksrwrwrx_defaultSuccessDebugActionsr2cCstdt|dS)NzException raised:)r,r)r-rr.excrwrwrx_defaultExceptionDebugActionsr4cGsdS)zG'Do-nothing' debug action, to suppress debugging output during parsing.Nrw)rrwrwrxrQsrqc stkrfddSdgdgtdddkrFddd }dd d n tj}tjd }|dd d}|d|d|ffdd}d}ytdtdj}Wntk rt}YnX||_|S)Ncs|S)Nrw)rlrv)funcrwrxrysz_trim_arity..rFrqrocSs8tdkr dnd }tj| |dd|}|j|jfgS) Nror7rrqrr)limit)ror7r)system_version traceback extract_stackfilenamerJ)r8r frame_summaryrwrwrxr=sz"_trim_arity..extract_stackcSs$tj||d}|d}|j|jfgS)N)r8rrrs)r< extract_tbr>rJ)tbr8Zframesr?rwrwrxr@sz_trim_arity..extract_tb)r8rrcsxy |dd}dd<|Stk rdr>n4z.tjd}|dddddksjWd~Xdkrdd7<wYqXqWdS)NrTrrrq)r8rsrs)rr~exc_info)rrrA)r@ foundArityr6r8maxargspa_call_line_synthrwrxwrappers"  z_trim_arity..wrapperzr __class__)ror7)r)rrs) singleArgBuiltinsr;r<r=r@getattrr Exceptionr{)r6rEr=Z LINE_DIFFZ this_linerG func_namerw)r@rDr6r8rErFrx _trim_aritys*   rMcseZdZdZdZdZeddZeddZddd Z d d Z d d Z dddZ dddZ ddZddZddZddZddZddZddd Zd!d"Zdd#d$Zd%d&Zd'd(ZGd)d*d*eZed+k rGd,d-d-eZnGd.d-d-eZiZeZd/d/gZ dd0d1Z!eZ"ed2d3Z#dZ$edd5d6Z%dd7d8Z&e'dfd9d:Z(d;d<Z)e'fd=d>Z*e'dfd?d@Z+dAdBZ,dCdDZ-dEdFZ.dGdHZ/dIdJZ0dKdLZ1dMdNZ2dOdPZ3dQdRZ4dSdTZ5dUdVZ6dWdXZ7dYdZZ8dd[d\Z9d]d^Z:d_d`Z;dadbZdgdhZ?ddidjZ@dkdlZAdmdnZBdodpZCdqdrZDgfdsdtZEddudvZFfdwdxZGdydzZHd{d|ZId}d~ZJddZKdddZLdddZMZNS)r$z)Abstract base level parser element class.z FcCs |t_dS)a Overrides the default whitespace chars Example:: # default whitespace chars are space, and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] N)r$DEFAULT_WHITE_CHARS)charsrwrwrxsetDefaultWhitespaceChars=s z'ParserElement.setDefaultWhitespaceCharscCs |t_dS)a Set class to be used for inclusion of string literals into a parser. Example:: # default literal class used is Literal integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] N)r$_literalStringClass)rrwrwrxinlineLiteralsUsingLsz!ParserElement.inlineLiteralsUsingcCst|_d|_d|_d|_||_d|_tj|_ d|_ d|_ d|_ t|_ d|_d|_d|_d|_d|_d|_d|_d|_d|_dS)NTFr)NNN)r parseAction failActionstrRepr resultsName saveAsListskipWhitespacer$rN whiteCharscopyDefaultWhiteCharsmayReturnEmptykeepTabs ignoreExprsdebug streamlined mayIndexErrorerrmsg modalResults debugActionsre callPreparse callDuringTry)rsavelistrwrwrxras(zParserElement.__init__cCs<tj|}|jdd|_|jdd|_|jr8tj|_|S)a$ Make a copy of this C{ParserElement}. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of C{expr.copy()} is just C{expr()}:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") N)rrSr]rZr$rNrY)rZcpyrwrwrxrxs  zParserElement.copycCs*||_d|j|_t|dr&|j|j_|S)af Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) z Expected exception)rrarrhr)rrrwrwrxsetNames    zParserElement.setNamecCs4|j}|jdr"|dd}d}||_| |_|S)aP Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") *NrrTrs)rendswithrVrb)rrlistAllMatchesZnewselfrwrwrxsetResultsNames  zParserElement.setResultsNameTcs@|r&|jdfdd }|_||_nt|jdr<|jj|_|S)zMethod to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. Tcsddl}|j||||S)Nr)pdbZ set_trace)r-r doActions callPreParsern) _parseMethodrwrxbreakersz'ParserElement.setBreak..breaker_originalParseMethod)TT)_parsersr)rZ breakFlagrrrw)rqrxsetBreaks  zParserElement.setBreakcOs&tttt||_|jdd|_|S)a  Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. Example:: integer = Word(nums) date_str = integer + '/' + integer + '/' + integer date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # use parse action to convert to ints at parse time integer = Word(nums).setParseAction(lambda toks: int(toks[0])) date_str = integer + '/' + integer + '/' + integer # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] rfF)rmaprMrSrrf)rfnsrrwrwrxrs"zParserElement.setParseActioncOs4|jtttt|7_|jp,|jdd|_|S)z Add parse action to expression's list of parse actions. See L{I{setParseAction}}. See examples in L{I{copy}}. rfF)rSrrvrMrfr)rrwrrwrwrxaddParseActionszParserElement.addParseActioncsb|jdd|jddrtntx(|D] fdd}|jj|q&W|jpZ|jdd|_|S)aAdd a boolean predicate function to expression's list of parse actions. See L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, functions passed to C{addCondition} need to return boolean success/fail of the condition. Optional keyword arguments: - message = define a custom message to be used in the raised exception - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") date_str = year_int + '/' + integer + '/' + integer result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) messagezfailed user-defined conditionfatalFcs$tt|||s ||dS)N)rrM)rr5rv)exc_typefnrrwrxpasz&ParserElement.addCondition..parf)rr!rrSrrf)rrwrr}rw)r{r|rrx addConditions  zParserElement.addConditioncCs ||_|S)a Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments C{fn(s,loc,expr,err)} where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw C{L{ParseFatalException}} if it is desired to stop parsing immediately.)rT)rr|rwrwrx setFailActions zParserElement.setFailActionc CsZd}xP|rTd}xB|jD]8}yx|j||\}}d}qWWqtk rLYqXqWqW|S)NTF)r]rtr)rr-rZ exprsFoundeZdummyrwrwrx_skipIgnorables#s  zParserElement._skipIgnorablescCsL|jr|j||}|jrH|j}t|}x ||krF|||krF|d7}q(W|S)Nrr)r]rrXrYr)rr-rZwtinstrlenrwrwrxpreParse0s  zParserElement.preParsecCs|gfS)Nrw)rr-rrorwrwrx parseImpl<szParserElement.parseImplcCs|S)Nrw)rr-r tokenlistrwrwrx postParse?szParserElement.postParsec "Cs|j}|s|jr|jdr,|jd||||rD|jrD|j||}n|}|}yDy|j|||\}}Wn(tk rt|t||j |YnXWnXt k r} z<|jdr|jd|||| |jr|j|||| WYdd} ~ XnXn|o|jr|j||}n|}|}|j s$|t|krhy|j|||\}}Wn*tk rdt|t||j |YnXn|j|||\}}|j |||}t ||j|j|jd} |jr|s|jr|rVyRxL|jD]B} | ||| }|dk rt ||j|jot|t tf|jd} qWWnFt k rR} z(|jdr@|jd|||| WYdd} ~ XnXnNxL|jD]B} | ||| }|dk r^t ||j|jot|t tf|jd} q^W|r|jdr|jd||||| || fS)Nrrq)rrrr)r^rTrcrerrrrrrarr`rr"rVrWrbrSrfrzr) rr-rrorpZ debuggingprelocZ tokensStarttokenserrZ retTokensr|rwrwrx _parseNoCacheCsp             zParserElement._parseNoCachec Cs>y|j||dddStk r8t|||j|YnXdS)NF)ror)rtr!rra)rr-rrwrwrxtryParseszParserElement.tryParsec Cs2y|j||Wnttfk r(dSXdSdS)NFT)rrr)rr-rrwrwrx canParseNexts zParserElement.canParseNextc@seZdZddZdS)zParserElement._UnboundedCachecsdit|_fdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)cache not_in_cacherwrxrsz3ParserElement._UnboundedCache.__init__..getcs ||<dS)Nrw)rrr)rrwrxsetsz3ParserElement._UnboundedCache.__init__..setcs jdS)N)r)r)rrwrxrsz5ParserElement._UnboundedCache.__init__..clear)rrtypes MethodTyperrr)rrrrrw)rrrxrs   z&ParserElement._UnboundedCache.__init__N)rrrrrwrwrwrx_UnboundedCachesrNc@seZdZddZdS)zParserElement._FifoCachecsht|_tfdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)rrrwrxrsz.ParserElement._FifoCache.__init__..getcs"||<tkrjddS)NF)rpopitem)rrr)rsizerwrxrs z.ParserElement._FifoCache.__init__..setcs jdS)N)r)r)rrwrxrsz0ParserElement._FifoCache.__init__..clear)rr _OrderedDictrrrrr)rrrrrrw)rrrrxrs  z!ParserElement._FifoCache.__init__N)rrrrrwrwrwrx _FifoCachesrc@seZdZddZdS)zParserElement._FifoCachecsvt|_itjgfdd}fdd}fdd}tj|||_tj|||_tj|||_dS)Ncs j|S)N)r)rr)rrrwrxrsz.ParserElement._FifoCache.__init__..getcs2||<tkr$jjdj|dS)N)rrpopleftr)rrr)rkey_fiforrwrxrs z.ParserElement._FifoCache.__init__..setcsjjdS)N)r)r)rrrwrxrsz0ParserElement._FifoCache.__init__..clear) rr collectionsdequerrrrr)rrrrrrw)rrrrrxrs  z!ParserElement._FifoCache.__init__N)rrrrrwrwrwrxrsrc Csd\}}|||||f}tjtj}|j|} | |jkrtj|d7<y|j||||} Wn8tk r} z|j|| j | j WYdd} ~ XqX|j|| d| dj f| Sn4tj|d7<t | t r| | d| dj fSWdQRXdS)Nrrr)rrr)r$packrat_cache_lock packrat_cacherrpackrat_cache_statsrrrrHrrrzrK) rr-rrorpZHITZMISSlookuprrrrwrwrx _parseCaches$   zParserElement._parseCachecCs(tjjdgttjtjdd<dS)Nr)r$rrrrrwrwrwrx resetCaches zParserElement.resetCachecCs8tjs4dt_|dkr tjt_n tj|t_tjt_dS)aEnables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. Parameters: - cache_size_limit - (default=C{128}) - if an integer value is provided will limit the size of the packrat cache; if None is passed, then the cache size will be unbounded; if 0 is passed, the cache will be effectively disabled. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method C{ParserElement.enablePackrat()}. If your program uses C{psyco} to "compile as you go", you must call C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. Example:: import pyparsing pyparsing.ParserElement.enablePackrat() TN)r$_packratEnabledrrrrrt)Zcache_size_limitrwrwrx enablePackrats   zParserElement.enablePackratcCstj|js|jx|jD] }|jqW|js<|j}y<|j|d\}}|rv|j||}t t }|j||Wn0t k r}ztj rn|WYdd}~XnX|SdS)aB Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set C{parseAll} to True (equivalent to ending the grammar with C{L{StringEnd()}}). Note: C{parseString} implicitly calls C{expandtabs()} on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the C{loc} argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling C{parseWithTabs} on your grammar before calling C{parseString} (see L{I{parseWithTabs}}) - define your parse action using the full C{(s,loc,toks)} signature, and reference the input string using the parse action's C{s} argument - explictly expand the tabs in your input string before calling C{parseString} Example:: Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text rN) r$rr_ streamliner]r\ expandtabsrtrr r)rverbose_stacktrace)rr-parseAllrrrZser3rwrwrx parseString#s$    zParserElement.parseStringccs@|js|jx|jD] }|jqW|js8t|j}t|}d}|j}|j}t j d} yx||kon| |kry |||} ||| dd\} } Wnt k r| d}Yq`X| |kr| d7} | | | fV|r|||} | |kr| }q|d7}n| }q`| d}q`WWn4t k r:}zt j r&n|WYdd}~XnXdS)a Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional C{maxMatches} argument, to clip scanning after 'n' matches are found. If C{overlap} is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}} for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd rF)rprrN)r_rr]r\rrrrrtr$rrrr)rr- maxMatchesZoverlaprrrZ preparseFnZparseFnmatchesrZnextLocrZnextlocr3rwrwrx scanStringUsB       zParserElement.scanStringcCsg}d}d|_yxh|j|D]Z\}}}|j||||rrt|trT||j7}nt|trh||7}n |j||}qW|j||ddd|D}djtt t |St k r}zt j rȂn|WYdd}~XnXdS)af Extension to C{L{scanString}}, to modify matching text with modified tokens that may be returned from a parse action. To use C{transformString}, define a grammar and attach a parse action to it that modifies the returned token list. Invoking C{transformString()} on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. C{transformString()} returns the resulting transformed string. Example:: wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) Prints:: Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. rTNcSsg|] }|r|qSrwrw)rorwrwrxrsz1ParserElement.transformString..r)r\rrrzr"rrrrvr_flattenrr$r)rr-r ZlastErvrrr3rwrwrxrs(    zParserElement.transformStringcCsPytdd|j||DStk rJ}ztjr6n|WYdd}~XnXdS)a~ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) prints:: ['More', 'Iron', 'Lead', 'Gold', 'I'] cSsg|]\}}}|qSrwrw)rrvrrrwrwrxrsz.ParserElement.searchString..N)r"rrr$r)rr-rr3rwrwrx searchStrings zParserElement.searchStringc csXd}d}x<|j||dD]*\}}}|||V|r>|dV|}qW||dVdS)a[ Generator method to split a string using the given expression as a separator. May be called with optional C{maxsplit} argument, to limit the number of splits; and the optional C{includeSeparators} argument (default=C{False}), if the separating matching text should be included in the split results. Example:: punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) prints:: ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] r)rN)r) rr-maxsplitZincludeSeparatorsZsplitsZlastrvrrrwrwrxrs  zParserElement.splitcCsFt|trtj|}t|ts:tjdt|tdddSt||gS)a Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement converts them to L{Literal}s by default. Example:: greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) Prints:: Hello, World! -> ['Hello', ',', 'World', '!'] z4Cannot combine element of type %s with ParserElementrq) stacklevelN) rzrr$rQwarningswarnr SyntaxWarningr)rrrwrwrxrs    zParserElement.__add__cCsBt|trtj|}t|ts:tjdt|tdddS||S)z] Implementation of + operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrxrs    zParserElement.__radd__cCsLt|trtj|}t|ts:tjdt|tdddSt|tj |gS)zQ Implementation of - operator, returns C{L{And}} with error stop z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr _ErrorStop)rrrwrwrx__sub__s    zParserElement.__sub__cCsBt|trtj|}t|ts:tjdt|tdddS||S)z] Implementation of - operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rsub__ s    zParserElement.__rsub__cst|tr|d}}nt|tr|d dd}|ddkrHd|df}t|dtr|ddkr|ddkrvtS|ddkrtS|dtSnJt|dtrt|dtr|\}}||8}ntdt|dt|dntdt||dkr td|dkrtd||ko2dknrBtd |rfd d |r|dkrt|}ntg||}n|}n|dkr}ntg|}|S) a Implementation of * operator, allows use of C{expr * 3} in place of C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples may also include C{None} as in: - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} Note that C{expr*(None,n)} does not raise an exception if more than n exprs exist in the input stream; that is, C{expr*(None,n)} does not enforce a maximum number of expr occurrences. If this behavior is desired, then write C{expr*(None,n) + ~expr} rNrqrrz7cannot multiply 'ParserElement' and ('%s','%s') objectsz0cannot multiply 'ParserElement' and '%s' objectsz/cannot multiply ParserElement by negative valuez@second tuple value must be greater or equal to first tuple valuez+cannot multiply ParserElement by 0 or (0,0)cs(|dkrt|dStSdS)Nrr)r)n)makeOptionalListrrwrxr]sz/ParserElement.__mul__..makeOptionalList)NN) rzrutupler2rrr ValueErrorr)rrZ minElementsZ optElementsrrw)rrrx__mul__,sD             zParserElement.__mul__cCs |j|S)N)r)rrrwrwrx__rmul__pszParserElement.__rmul__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zI Implementation of | operator - returns C{L{MatchFirst}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr)rrrwrwrx__or__ss    zParserElement.__or__cCsBt|trtj|}t|ts:tjdt|tdddS||BS)z] Implementation of | operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__ror__s    zParserElement.__ror__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zA Implementation of ^ operator - returns C{L{Or}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr)rrrwrwrx__xor__s    zParserElement.__xor__cCsBt|trtj|}t|ts:tjdt|tdddS||AS)z] Implementation of ^ operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rxor__s    zParserElement.__rxor__cCsFt|trtj|}t|ts:tjdt|tdddSt||gS)zC Implementation of & operator - returns C{L{Each}} z4Cannot combine element of type %s with ParserElementrq)rN) rzrr$rQrrrrr )rrrwrwrx__and__s    zParserElement.__and__cCsBt|trtj|}t|ts:tjdt|tdddS||@S)z] Implementation of & operator when left operand is not a C{L{ParserElement}} z4Cannot combine element of type %s with ParserElementrq)rN)rzrr$rQrrrr)rrrwrwrx__rand__s    zParserElement.__rand__cCst|S)zE Implementation of ~ operator - returns C{L{NotAny}} )r)rrwrwrx __invert__szParserElement.__invert__cCs|dk r|j|S|jSdS)a  Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") N)rmr)rrrwrwrx__call__s zParserElement.__call__cCst|S)z Suppresses the output of this C{ParserElement}; useful to keep punctuation from cluttering up returned output. )r+)rrwrwrxsuppressszParserElement.suppresscCs d|_|S)a Disables the skipping of whitespace before matching the characters in the C{ParserElement}'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. F)rX)rrwrwrxleaveWhitespaceszParserElement.leaveWhitespacecCsd|_||_d|_|S)z8 Overrides the default whitespace chars TF)rXrYrZ)rrOrwrwrxsetWhitespaceCharssz ParserElement.setWhitespaceCharscCs d|_|S)z Overrides default behavior to expand C{}s to spaces before parsing the input string. Must be called before C{parseString} when the input grammar contains elements that match C{} characters. T)r\)rrwrwrx parseWithTabsszParserElement.parseWithTabscCsLt|trt|}t|tr4||jkrH|jj|n|jjt|j|S)a Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] )rzrr+r]rr)rrrwrwrxignores   zParserElement.ignorecCs"|pt|p t|ptf|_d|_|S)zT Enable display of debugging messages while doing pattern matching. T)r/r2r4rcr^)rZ startActionZ successActionZexceptionActionrwrwrxsetDebugActions s  zParserElement.setDebugActionscCs|r|jtttnd|_|S)a Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match at loc (,)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. F)rr/r2r4r^)rflagrwrwrxsetDebugs#zParserElement.setDebugcCs|jS)N)r)rrwrwrxr@szParserElement.__str__cCst|S)N)r)rrwrwrxrCszParserElement.__repr__cCsd|_d|_|S)NT)r_rU)rrwrwrxrFszParserElement.streamlinecCsdS)Nrw)rrrwrwrxcheckRecursionKszParserElement.checkRecursioncCs|jgdS)zj Check defined expressions for valid structure, check for infinite recursive definitions. N)r)r validateTracerwrwrxvalidateNszParserElement.validatecCsy |j}Wn2tk r>t|d}|j}WdQRXYnXy |j||Stk r|}ztjrhn|WYdd}~XnXdS)z Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. rN)readropenrrr$r)rZfile_or_filenamerZ file_contentsfr3rwrwrx parseFileTs   zParserElement.parseFilecsHt|tr"||kp t|t|kSt|tr6|j|Stt||kSdS)N)rzr$varsrrsuper)rr)rHrwrx__eq__hs    zParserElement.__eq__cCs ||k S)Nrw)rrrwrwrx__ne__pszParserElement.__ne__cCs tt|S)N)hashid)rrwrwrx__hash__sszParserElement.__hash__cCs||kS)Nrw)rrrwrwrx__req__vszParserElement.__req__cCs ||k S)Nrw)rrrwrwrx__rne__yszParserElement.__rne__c Cs0y|jt||ddStk r*dSXdS)a Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. Parameters: - testString - to test against this expression for a match - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests Example:: expr = Word(nums) assert expr.matches("100") )rTFN)rrr)rZ testStringrrwrwrxr|s zParserElement.matches#cCst|tr"tttj|jj}t|tr4t|}g}g}d} x|D]} |dk rb|j | dsl|rx| rx|j | qH| s~qHdj || g} g}y:| j dd} |j | |d} | j | j|d| o| } Wntk rx} zt| trdnd }d| kr0| j t| j| | j d t| j| d d |n| j d | jd || j d t| | ob|} | } WYdd} ~ XnDtk r}z&| j dt|| o|} |} WYdd}~XnX|r|r| j d tdj | |j | | fqHW| |fS)a3 Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. Parameters: - tests - a list of separate test strings, or a multiline string of test strings - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - comment - (default=C{'#'}) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; if False, only dump nested list - printResults - (default=C{True}) prints test output to stdout - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if C{failureTests} is True), and the results contain a list of lines of each test's output Example:: number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' # unsigned integer 100 # negative integer -100 # float with scientific notation 6.02e23 # integer with scientific notation 1e-12 ''') print("Success" if result[0] else "Failed!") result = number_expr.runTests(''' # stray character 100Z # missing leading digit before '.' -.100 # too many '.' 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") prints:: # unsigned integer 100 [100] # negative integer -100 [-100] # float with scientific notation 6.02e23 [6.02e+23] # integer with scientific notation 1e-12 [1e-12] Success # stray character 100Z ^ FAIL: Expected end of text (at char 3), (line:1, col:4) # missing leading digit before '.' -.100 ^ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) # too many '.' 3.14.159 ^ FAIL: Expected end of text (at char 4), (line:1, col:5) Success Each test string must be on a single line. If you want to test a string that spans multiple lines, create a test like this:: expr.runTest(r"this is a test\n of strings that spans \n 3 lines") (Note that this is a raw string literal, you must include the leading 'r'.) TNFrz\n)r)r z(FATAL)r rr^zFAIL: zFAIL-EXCEPTION: )rzrrrvr{rrstrip splitlinesrrrrrrrrr!rGrr9rKr,)rZtestsrZcommentZfullDumpZ printResultsZ failureTestsZ allResultsZcommentssuccessrvr resultrrzr3rwrwrxrunTestssNW     $   zParserElement.runTests)F)F)T)T)TT)TT)r)F)N)T)F)T)TrTTF)OrrrrrNr staticmethodrPrRrrrirmrurrxr~rrrrrrrrrrrrrrrrrrtrrrr_MAX_INTrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr __classcell__rwrw)rHrxr$8s     &     H   " 2G+    D           )    cs eZdZdZfddZZS)r,zT Abstract C{ParserElement} subclass, for defining atomic matching patterns. cstt|jdddS)NF)rg)rr,r)r)rHrwrxr szToken.__init__)rrrrrrrwrw)rHrxr, scs eZdZdZfddZZS)r z, An empty token, will always match. cs$tt|jd|_d|_d|_dS)Nr TF)rr rrr[r`)r)rHrwrxr szEmpty.__init__)rrrrrrrwrw)rHrxr  scs*eZdZdZfddZdddZZS)rz( A token that will never match. cs*tt|jd|_d|_d|_d|_dS)NrTFzUnmatchable token)rrrrr[r`ra)r)rHrwrxr* s zNoMatch.__init__TcCst|||j|dS)N)rra)rr-rrorwrwrxr1 szNoMatch.parseImpl)T)rrrrrrrrwrw)rHrxr& s cs*eZdZdZfddZdddZZS)ra Token to exactly match a specified string. Example:: Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" For case-insensitive matching, use L{CaselessLiteral}. For keyword matching (force word break before and after the matched string), use L{Keyword} or L{CaselessKeyword}. c stt|j||_t||_y|d|_Wn*tk rVtj dt ddt |_ YnXdt |j|_d|j|_d|_d|_dS)Nrz2null string passed to Literal; use Empty() insteadrq)rz"%s"z Expected F)rrrmatchrmatchLenfirstMatchCharrrrrr rHrrrar[r`)r matchString)rHrwrxrC s    zLiteral.__init__TcCsJ|||jkr6|jdks&|j|j|r6||j|jfSt|||j|dS)Nrr)rr startswithrrra)rr-rrorwrwrxrV szLiteral.parseImpl)T)rrrrrrrrwrw)rHrxr5 s  csLeZdZdZedZdfdd Zddd Zfd d Ze d d Z Z S)ra\ Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with C{L{Literal}}: - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$" - C{caseless} allows case-insensitive matching, default is C{False}. Example:: Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception For case-insensitive matching, use L{CaselessKeyword}. z_$NFc stt|j|dkrtj}||_t||_y|d|_Wn$tk r^t j dt ddYnXd|j|_ d|j |_ d|_d|_||_|r|j|_|j}t||_dS)Nrz2null string passed to Keyword; use Empty() insteadrq)rz"%s"z Expected F)rrrDEFAULT_KEYWORD_CHARSrrrrrrrrrrar[r`caselessupper caselessmatchr identChars)rrrr)rHrwrxrq s&    zKeyword.__init__TcCs|jr|||||jj|jkr|t||jksL|||jj|jkr|dksj||dj|jkr||j|jfSnv|||jkr|jdks|j|j|r|t||jks|||j|jkr|dks||d|jkr||j|jfSt |||j |dS)Nrrr) rrrrrrrrrrra)rr-rrorwrwrxr s*&zKeyword.parseImplcstt|j}tj|_|S)N)rrrrr)rr)rHrwrxr sz Keyword.copycCs |t_dS)z,Overrides the default Keyword chars N)rr)rOrwrwrxsetDefaultKeywordChars szKeyword.setDefaultKeywordChars)NF)T) rrrrr3rrrrrrrrwrw)rHrxr^ s   cs*eZdZdZfddZdddZZS)ral Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for L{CaselessKeyword}.) cs6tt|j|j||_d|j|_d|j|_dS)Nz'%s'z Expected )rrrr returnStringrra)rr)rHrwrxr s zCaselessLiteral.__init__TcCs@||||jj|jkr,||j|jfSt|||j|dS)N)rrrrrra)rr-rrorwrwrxr szCaselessLiteral.parseImpl)T)rrrrrrrrwrw)rHrxr s  cs,eZdZdZdfdd Zd ddZZS) rz Caseless version of L{Keyword}. Example:: OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for L{CaselessLiteral}.) Ncstt|j||dddS)NT)r)rrr)rrr)rHrwrxr szCaselessKeyword.__init__TcCsj||||jj|jkrV|t||jksF|||jj|jkrV||j|jfSt|||j|dS)N)rrrrrrrra)rr-rrorwrwrxr s*zCaselessKeyword.parseImpl)N)T)rrrrrrrrwrw)rHrxr scs,eZdZdZdfdd Zd ddZZS) rlax A variation on L{Literal} which matches "close" matches, that is, strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - C{match_string} - string to be matched - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match The results from a successful parse will contain the matched text from the input string and the following named results: - C{mismatches} - a list of the positions within the match_string where mismatches were found - C{original} - the original match_string used to compare against the input string If C{mismatches} is an empty list, then the match was an exact match. Example:: patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) # exact match patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) # close match allowing up to 2 mismatches patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) rrcsBtt|j||_||_||_d|j|jf|_d|_d|_dS)Nz&Expected %r (with up to %d mismatches)F) rrlrr match_string maxMismatchesrar`r[)rrr)rHrwrxr szCloseMatch.__init__TcCs|}t|}|t|j}||kr|j}d}g} |j} xtt||||jD]0\}} | \} } | | krP| j|t| | krPPqPW|d}t|||g}|j|d<| |d<||fSt|||j|dS)Nrrroriginal mismatches) rrrrrrr"rra)rr-rrostartrmaxlocrZmatch_stringlocrrZs_msrcmatresultsrwrwrxr s("   zCloseMatch.parseImpl)rr)T)rrrrrrrrwrw)rHrxrl s cs8eZdZdZd fdd Zdd d Zfd d ZZS)r/a Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. An optional C{excludeChars} parameter can list characters that might be found in the input C{bodyChars} string; useful to define a word of all printables except for one or two characters, for instance. L{srange} is useful for defining custom character set strings for defining C{Word} expressions, using range notation from regular expression character sets. A common mistake is to use C{Word} to match a specific literal string, as in C{Word("Address")}. Remember that C{Word} uses the string argument to define I{sets} of matchable characters. This expression would match "Add", "AAA", "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an exact literal string, use L{Literal} or L{Keyword}. pyparsing includes helper strings for building Words: - L{alphas} - L{nums} - L{alphanums} - L{hexnums} - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - L{printables} (any non-whitespace character) Example:: # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") NrrrFc stt|jrFdjfdd|D}|rFdjfdd|D}||_t||_|rl||_t||_n||_t||_|dk|_ |dkrt d||_ |dkr||_ nt |_ |dkr||_ ||_ t||_d|j|_d |_||_d |j|jkr|dkr|dkr|dkr|j|jkr8d t|j|_nHt|jdkrfd tj|jt|jf|_nd t|jt|jf|_|jrd|jd|_ytj|j|_Wntk rd|_YnXdS)Nrc3s|]}|kr|VqdS)Nrw)rr) excludeCharsrwrxr7 sz Word.__init__..c3s|]}|kr|VqdS)Nrw)rr)rrwrxr9 srrrzZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permittedz Expected Frz[%s]+z%s[%s]*z [%s][%s]*z\b)rr/rr initCharsOrigr initChars bodyCharsOrig bodyChars maxSpecifiedrminLenmaxLenrrrrar` asKeyword_escapeRegexRangeCharsreStringrrdescapecompilerK)rrrminmaxexactrr)rH)rrxr4 sT      0 z Word.__init__Tc CsD|jr<|jj||}|s(t|||j||j}||jfS|||jkrZt|||j||}|d7}t|}|j}||j }t ||}x ||kr|||kr|d7}qWd} |||j krd} |j r||kr|||krd} |j r|dkr||d|ks||kr|||krd} | r4t|||j|||||fS)NrrFTr)rdrrraendgrouprrrrr rrr) rr-rrorrrZ bodycharsrZthrowExceptionrwrwrxrj s6    4zWord.parseImplc stytt|jStk r"YnX|jdkrndd}|j|jkr^d||j||jf|_nd||j|_|jS)NcSs$t|dkr|dddS|SdS)Nz...)r)rrwrwrx charsAsStr s z Word.__str__..charsAsStrz W:(%s,%s)zW:(%s))rr/rrKrUrr)rr)rHrwrxr s  z Word.__str__)NrrrrFN)T)rrrrrrrrrwrw)rHrxr/ s.6 #csFeZdZdZeejdZd fdd Zd ddZ fd d Z Z S) r'a Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as named parse results. Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") z[A-Z]rc stt|jt|tr|s,tjdtdd||_||_ yt j |j|j |_ |j|_ Wqt jk rtjd|tddYqXn2t|tjr||_ t||_|_ ||_ ntdt||_d|j|_d|_d|_d S) zThe parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.z0null string passed to Regex; use Empty() insteadrq)rz$invalid pattern (%s) passed to RegexzCRegex may only be constructed with a string or a compiled RE objectz Expected FTN)rr'rrzrrrrpatternflagsrdr r sre_constantserrorcompiledREtyper{rrrrar`r[)rrr)rHrwrxr s.         zRegex.__init__TcCsd|jj||}|s"t|||j||j}|j}t|j}|r\x|D]}||||<qHW||fS)N)rdrrrar groupdictr"r)rr-rrordrrrwrwrxr s  zRegex.parseImplc sDytt|jStk r"YnX|jdkr>dt|j|_|jS)NzRe:(%s))rr'rrKrUrr)r)rHrwrxr s z Regex.__str__)r)T) rrrrrrdr rrrrrrwrw)rHrxr' s  " cs8eZdZdZd fdd Zd ddZfd d ZZS) r%a Token for matching strings that are delimited by quoting characters. Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=C{None}) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) Example:: qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) prints:: [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] NFTc sNttj|j}|s0tjdtddt|dkr>|}n"|j}|s`tjdtddt|_t |_ |d_ |_ t |_ |_|_|_|_|rtjtjB_dtjjtj d|dk rt|pdf_n.)z|(?:%s)z|(?:%s.)z(.)z)*%sz$invalid pattern (%s) passed to Regexz Expected FTrs)%rr%rrrrr SyntaxError quoteCharr quoteCharLenfirstQuoteCharrendQuoteCharLenescCharescQuoteunquoteResultsconvertWhitespaceEscapesrd MULTILINEDOTALLrr rrrrescCharReplacePatternr rrrrrrar`r[)rrr r!Z multiliner"rr#)rH)rrxr sf       6     zQuotedString.__init__c Cs|||jkr|jj||pd}|s4t|||j||j}|j}|jr||j|j }t |t rd|kr|j rddddd}x |j D]\}}|j||}qW|jrtj|jd|}|jr|j|j|j}||fS)N\ r  )z\tz\nz\fz\rz\g<1>)rrdrrrarrr"rrrzrr#rrr rr&r!r) rr-rrorrZws_mapZwslitZwscharrwrwrxrG s(  zQuotedString.parseImplc sFytt|jStk r"YnX|jdkr@d|j|jf|_|jS)Nz.quoted string, starting with %s ending with %s)rr%rrKrUrr)r)rHrwrxrj s zQuotedString.__str__)NNFTNT)T)rrrrrrrrrwrw)rHrxr% sA #cs8eZdZdZd fdd Zd ddZfd d ZZS) r a Token for matching words composed of characters I{not} in a given set (will include whitespace in matched characters if not listed in the provided exclusion set - see example). Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. Example:: # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) prints:: ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] rrrcstt|jd|_||_|dkr*td||_|dkr@||_nt|_|dkrZ||_||_t ||_ d|j |_ |jdk|_ d|_ dS)NFrrzfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedrz Expected )rr rrXnotCharsrrrrrrrar[r`)rr+r r r )rHrwrxr s    zCharsNotIn.__init__TcCs|||jkrt|||j||}|d7}|j}t||jt|}x ||krd|||krd|d7}qFW|||jkrt|||j|||||fS)Nrr)r+rrar rrr)rr-rrorZnotcharsmaxlenrwrwrxr s   zCharsNotIn.parseImplc sdytt|jStk r"YnX|jdkr^t|jdkrRd|jdd|_n d|j|_|jS)Nrz !W:(%s...)z!W:(%s))rr rrKrUrr+)r)rHrwrxr s  zCharsNotIn.__str__)rrrr)T)rrrrrrrrrwrw)rHrxr v s cs<eZdZdZddddddZdfd d ZdddZZS)r.a Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is C{" \t\r\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, as defined for the C{L{Word}} class. zzzzz)rr(rr*r) rrrcsttj|_jdjfddjDdjddjD_d_dj_ |_ |dkrt|_ nt _ |dkr|_ |_ dS)Nrc3s|]}|jkr|VqdS)N) matchWhite)rr)rrwrxr sz!White.__init__..css|]}tj|VqdS)N)r. whiteStrs)rrrwrwrxr sTz Expected r) rr.rr.rrrYrr[rarrr)rZwsr r r )rH)rrxr s  zWhite.__init__TcCs|||jkrt|||j||}|d7}||j}t|t|}x"||krd|||jkrd|d7}qDW|||jkrt|||j|||||fS)Nrr)r.rrarr rr)rr-rrorrrwrwrxr s  zWhite.parseImpl)r-rrrr)T)rrrrr/rrrrwrw)rHrxr. scseZdZfddZZS)_PositionTokencs(tt|j|jj|_d|_d|_dS)NTF)rr0rrHrrr[r`)r)rHrwrxr s z_PositionToken.__init__)rrrrrrwrw)rHrxr0 sr0cs2eZdZdZfddZddZd ddZZS) rzb Token to advance to a specific column of input text; useful for tabular report scraping. cstt|j||_dS)N)rrrr9)rcolno)rHrwrxr szGoToColumn.__init__cCs`t|||jkr\t|}|jr*|j||}x0||krZ||jrZt|||jkrZ|d7}q,W|S)Nrr)r9rr]risspace)rr-rrrwrwrxr s & zGoToColumn.preParseTcCsDt||}||jkr"t||d|||j|}|||}||fS)NzText not in expected column)r9r)rr-rroZthiscolZnewlocrrwrwrxr s    zGoToColumn.parseImpl)T)rrrrrrrrrwrw)rHrxr s  cs*eZdZdZfddZdddZZS)ra Matches if current position is at the beginning of a line within the parse string Example:: test = ''' AAA this line AAA and this line AAA but not this one B AAA and definitely not this one ''' for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) Prints:: ['AAA', ' this line'] ['AAA', ' and this line'] cstt|jd|_dS)NzExpected start of line)rrrra)r)rHrwrxr& szLineStart.__init__TcCs*t||dkr|gfSt|||j|dS)Nrr)r9rra)rr-rrorwrwrxr* szLineStart.parseImpl)T)rrrrrrrrwrw)rHrxr s cs*eZdZdZfddZdddZZS)rzU Matches if current position is at the end of a line within the parse string cs,tt|j|jtjjddd|_dS)NrrzExpected end of line)rrrrr$rNrra)r)rHrwrxr3 szLineEnd.__init__TcCsb|t|kr6||dkr$|ddfSt|||j|n(|t|krN|dgfSt|||j|dS)Nrrr)rrra)rr-rrorwrwrxr8 s     zLineEnd.parseImpl)T)rrrrrrrrwrw)rHrxr/ s cs*eZdZdZfddZdddZZS)r*zM Matches if current position is at the beginning of the parse string cstt|jd|_dS)NzExpected start of text)rr*rra)r)rHrwrxrG szStringStart.__init__TcCs0|dkr(||j|dkr(t|||j||gfS)Nr)rrra)rr-rrorwrwrxrK szStringStart.parseImpl)T)rrrrrrrrwrw)rHrxr*C s cs*eZdZdZfddZdddZZS)r)zG Matches if current position is at the end of the parse string cstt|jd|_dS)NzExpected end of text)rr)rra)r)rHrwrxrV szStringEnd.__init__TcCs^|t|krt|||j|n<|t|kr6|dgfS|t|krJ|gfSt|||j|dS)Nrr)rrra)rr-rrorwrwrxrZ s    zStringEnd.parseImpl)T)rrrrrrrrwrw)rHrxr)R s cs.eZdZdZeffdd ZdddZZS)r1ap Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of the string being parsed, or at the beginning of a line. cs"tt|jt||_d|_dS)NzNot at the start of a word)rr1rr wordCharsra)rr3)rHrwrxrl s zWordStart.__init__TcCs@|dkr8||d|jks(|||jkr8t|||j||gfS)Nrrr)r3rra)rr-rrorwrwrxrq s zWordStart.parseImpl)T)rrrrrVrrrrwrw)rHrxr1d scs.eZdZdZeffdd ZdddZZS)r0aZ Matches if the current position is at the end of a Word, and is not followed by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of the string being parsed, or at the end of a line. cs(tt|jt||_d|_d|_dS)NFzNot at the end of a word)rr0rrr3rXra)rr3)rHrwrxr s zWordEnd.__init__TcCsPt|}|dkrH||krH|||jks8||d|jkrHt|||j||gfS)Nrrr)rr3rra)rr-rrorrwrwrxr s zWordEnd.parseImpl)T)rrrrrVrrrrwrw)rHrxr0x scseZdZdZdfdd ZddZddZd d Zfd d Zfd dZ fddZ dfdd Z gfddZ fddZ ZS)r z^ Abstract subclass of ParserElement, for combining and post-processing parsed tokens. Fc stt|j|t|tr"t|}t|tr.F)rr rrzrrrr$rQexprsrIterableallrvrre)rr4rg)rHrwrxr s     zParseExpression.__init__cCs |j|S)N)r4)rrrwrwrxr szParseExpression.__getitem__cCs|jj|d|_|S)N)r4rrU)rrrwrwrxr s zParseExpression.appendcCs4d|_dd|jD|_x|jD] }|jq W|S)z~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on all contained expressions.FcSsg|] }|jqSrw)r)rrrwrwrxr sz3ParseExpression.leaveWhitespace..)rXr4r)rrrwrwrxr s   zParseExpression.leaveWhitespacecszt|trF||jkrvtt|j|xP|jD]}|j|jdq,Wn0tt|j|x|jD]}|j|jdq^W|S)Nrrrsrs)rzr+r]rr rr4)rrr)rHrwrxr s    zParseExpression.ignorec sLytt|jStk r"YnX|jdkrFd|jjt|jf|_|jS)Nz%s:(%s)) rr rrKrUrHrrr4)r)rHrwrxr s zParseExpression.__str__cs0tt|jx|jD] }|jqWt|jdkr|jd}t||jr|j r|jdkr|j r|jdd|jdg|_d|_ |j |j O_ |j |j O_ |jd}t||jo|j o|jdko|j r|jdd|jdd|_d|_ |j |j O_ |j |j O_ dt ||_|S)Nrqrrrz Expected rsrs)rr rr4rrzrHrSrVr^rUr[r`rra)rrr)rHrwrxr s0         zParseExpression.streamlinecstt|j||}|S)N)rr rm)rrrlr)rHrwrxrm szParseExpression.setResultsNamecCs:|dd|g}x|jD]}|j|qW|jgdS)N)r4rr)rrtmprrwrwrxr s zParseExpression.validatecs$tt|j}dd|jD|_|S)NcSsg|] }|jqSrw)r)rrrwrwrxr sz(ParseExpression.copy..)rr rr4)rr)rHrwrxr szParseExpression.copy)F)F)rrrrrrrrrrrrmrrrrwrw)rHrxr s " csTeZdZdZGdddeZdfdd ZdddZd d Zd d Z d dZ Z S)ra  Requires all given C{ParseExpression}s to be found in the given order. Expressions may be separated by whitespace. May be constructed using the C{'+'} operator. May also be constructed using the C{'-'} operator, which will suppress backtracking. Example:: integer = Word(nums) name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"),name_expr("name"),integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") cseZdZfddZZS)zAnd._ErrorStopcs&ttj|j||d|_|jdS)N-)rrrrrr)rrr)rHrwrxr szAnd._ErrorStop.__init__)rrrrrrwrw)rHrxr srTcsRtt|j||tdd|jD|_|j|jdj|jdj|_d|_ dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr szAnd.__init__..rT) rrrr6r4r[rrYrXre)rr4rg)rHrwrxr s z And.__init__c Cs|jdj|||dd\}}d}x|jddD]}t|tjrFd}q0|ry|j|||\}}Wqtk rvYqtk r}zd|_tj|WYdd}~Xqt k rt|t ||j |YqXn|j|||\}}|s|j r0||7}q0W||fS)NrF)rprrT) r4rtrzrrr#r __traceback__rrrrar) rr-rro resultlistZ errorStoprZ exprtokensrrwrwrxr s(   z And.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrxr5 s  z And.__iadd__cCs8|dd|g}x |jD]}|j||jsPqWdS)N)r4rr[)rrsubRecCheckListrrwrwrxr: s   zAnd.checkRecursioncCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nr{rcss|]}t|VqdS)N)r)rrrwrwrxrF szAnd.__str__..})rrrUrr4)rrwrwrxrA s    z And.__str__)T)T) rrrrr rrrrrrrrwrw)rHrxr s csDeZdZdZdfdd ZdddZdd Zd d Zd d ZZ S)ra Requires that at least one C{ParseExpression} is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the C{'^'} operator. Example:: # construct Or using '^' operator number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) prints:: [['123'], ['3.1416'], ['789']] Fcs:tt|j|||jr0tdd|jD|_nd|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr\ szOr.__init__..T)rrrr4rr[)rr4rg)rHrwrxrY sz Or.__init__Tc CsTd}d}g}x|jD]}y|j||}Wnvtk rd} z d| _| j|krT| }| j}WYdd} ~ Xqtk rt||krt|t||j|}t|}YqX|j||fqW|r*|j dddx`|D]X\} }y|j |||Stk r$} z"d| _| j|kr| }| j}WYdd} ~ XqXqW|dk rB|j|_ |nt||d|dS)NrrcSs |d S)Nrrw)xrwrwrxryu szOr.parseImpl..)rz no defined alternatives to matchrs) r4rrr9rrrrarsortrtr) rr-rro maxExcLoc maxExceptionrrZloc2r_rwrwrxr` s<     z Or.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrx__ixor__ s  z Or.__ixor__cCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z ^ css|]}t|VqdS)N)r)rrrwrwrxr szOr.__str__..r=)rrrUrr4)rrwrwrxr s    z Or.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxr s zOr.checkRecursion)F)T) rrrrrrrCrrrrwrw)rHrxrK s   & csDeZdZdZdfdd ZdddZdd Zd d Zd d ZZ S)ra Requires that at least one C{ParseExpression} is found. If two expressions match, the first one listed is the one that will match. May be constructed using the C{'|'} operator. Example:: # construct MatchFirst using '|' operator # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] # put more selective expression first number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] Fcs:tt|j|||jr0tdd|jD|_nd|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxr sz&MatchFirst.__init__..T)rrrr4rr[)rr4rg)rHrwrxr szMatchFirst.__init__Tc Csd}d}x|jD]}y|j|||}|Stk r\}z|j|krL|}|j}WYdd}~Xqtk rt||krt|t||j|}t|}YqXqW|dk r|j|_|nt||d|dS)Nrrz no defined alternatives to matchrs)r4rtrrrrrar) rr-rror@rArrrrwrwrxr s$   zMatchFirst.parseImplcCst|trtj|}|j|S)N)rzrr$rQr)rrrwrwrx__ior__ s  zMatchFirst.__ior__cCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z | css|]}t|VqdS)N)r)rrrwrwrxr sz%MatchFirst.__str__..r=)rrrUrr4)rrwrwrxr s    zMatchFirst.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxr s zMatchFirst.checkRecursion)F)T) rrrrrrrDrrrrwrw)rHrxr s   cs<eZdZdZd fdd Zd ddZddZd d ZZS) r am Requires all given C{ParseExpression}s to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the C{'&'} operator. Example:: color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) shape_attr = "shape:" + shape_type("shape") posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") color_attr = "color:" + color("color") size_attr = "size:" + integer("size") # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) shape_spec.runTests(''' shape: SQUARE color: BLACK posn: 100, 120 shape: CIRCLE size: 50 color: BLUE posn: 50,80 color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) prints:: shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK - posn: ['100', ',', '120'] - x: 100 - y: 120 - shape: SQUARE shape: CIRCLE size: 50 color: BLUE posn: 50,80 ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - color: BLUE - posn: ['50', ',', '80'] - x: 50 - y: 80 - shape: CIRCLE - size: 50 color: GREEN size: 20 shape: TRIANGLE posn: 20,40 ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - color: GREEN - posn: ['20', ',', '40'] - x: 20 - y: 40 - shape: TRIANGLE - size: 20 Tcs8tt|j||tdd|jD|_d|_d|_dS)Ncss|] }|jVqdS)N)r[)rrrwrwrxrsz Each.__init__..T)rr rr6r4r[rXinitExprGroups)rr4rg)rHrwrxrsz Each.__init__c s|jrtdd|jD|_dd|jD}dd|jD}|||_dd|jD|_dd|jD|_dd|jD|_|j|j7_d |_|}|jdd}|jddg}d } x| rp||j|j} g} x~| D]v} y| j||}Wn t k r| j | YqX|j |jj t | | | |krD|j | q| krj | qWt| t| krd } qW|rd jd d|D} t ||d | |fdd|jD7}g}x*|D]"} | j|||\}}|j |qWt|tg}||fS)Ncss&|]}t|trt|j|fVqdS)N)rzrrr.)rrrwrwrxrsz!Each.parseImpl..cSsg|]}t|tr|jqSrw)rzrr.)rrrwrwrxrsz"Each.parseImpl..cSs"g|]}|jrt|t r|qSrw)r[rzr)rrrwrwrxrscSsg|]}t|tr|jqSrw)rzr2r.)rrrwrwrxr scSsg|]}t|tr|jqSrw)rzrr.)rrrwrwrxr!scSs g|]}t|tttfs|qSrw)rzrr2r)rrrwrwrxr"sFTz, css|]}t|VqdS)N)r)rrrwrwrxr=sz*Missing one or more required elements (%s)cs$g|]}t|tr|jkr|qSrw)rzrr.)rr)tmpOptrwrxrAs)rErr4Zopt1mapZ optionalsZmultioptionalsZ multirequiredZrequiredrrrrrremoverrrtsumr")rr-rroZopt1Zopt2ZtmpLocZtmpReqdZ matchOrderZ keepMatchingZtmpExprsZfailedrZmissingr:rZ finalResultsrw)rFrxrsP     zEach.parseImplcCs@t|dr|jS|jdkr:ddjdd|jDd|_|jS)Nrr<z & css|]}t|VqdS)N)r)rrrwrwrxrPszEach.__str__..r=)rrrUrr4)rrwrwrxrKs    z Each.__str__cCs0|dd|g}x|jD]}|j|qWdS)N)r4r)rrr;rrwrwrxrTs zEach.checkRecursion)T)T) rrrrrrrrrrwrw)rHrxr s 5 1 csleZdZdZdfdd ZdddZdd Zfd d Zfd d ZddZ gfddZ fddZ Z S)rza Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. Fcstt|j|t|tr@ttjtr2tj|}ntjt |}||_ d|_ |dk r|j |_ |j |_ |j|j|j|_|j|_|j|_|jj|jdS)N)rrrrzr issubclassr$rQr,rr.rUr`r[rrYrXrWrer]r)rr.rg)rHrwrxr^s    zParseElementEnhance.__init__TcCs2|jdk r|jj|||ddStd||j|dS)NF)rpr)r.rtrra)rr-rrorwrwrxrps zParseElementEnhance.parseImplcCs*d|_|jj|_|jdk r&|jj|S)NF)rXr.rr)rrwrwrxrvs    z#ParseElementEnhance.leaveWhitespacecsrt|trB||jkrntt|j||jdk rn|jj|jdn,tt|j||jdk rn|jj|jd|S)Nrrrsrs)rzr+r]rrrr.)rr)rHrwrxr}s    zParseElementEnhance.ignorecs&tt|j|jdk r"|jj|S)N)rrrr.)r)rHrwrxrs  zParseElementEnhance.streamlinecCsB||krt||g|dd|g}|jdk r>|jj|dS)N)r&r.r)rrr;rwrwrxrs  z"ParseElementEnhance.checkRecursioncCs6|dd|g}|jdk r(|jj||jgdS)N)r.rr)rrr7rwrwrxrs  zParseElementEnhance.validatec sVytt|jStk r"YnX|jdkrP|jdk rPd|jjt|jf|_|jS)Nz%s:(%s)) rrrrKrUr.rHrr)r)rHrwrxrszParseElementEnhance.__str__)F)T) rrrrrrrrrrrrrrwrw)rHrxrZs   cs*eZdZdZfddZdddZZS)ra Lookahead matching of the given parse expression. C{FollowedBy} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. C{FollowedBy} always returns a null token list. Example:: # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] cstt|j|d|_dS)NT)rrrr[)rr.)rHrwrxrszFollowedBy.__init__TcCs|jj|||gfS)N)r.r)rr-rrorwrwrxrszFollowedBy.parseImpl)T)rrrrrrrrwrw)rHrxrs cs2eZdZdZfddZd ddZddZZS) ra Lookahead to disallow matching with the given parse expression. C{NotAny} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression does I{not} match at the current position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} always returns a null token list. May be constructed using the '~' operator. Example:: cs0tt|j|d|_d|_dt|j|_dS)NFTzFound unwanted token, )rrrrXr[rr.ra)rr.)rHrwrxrszNotAny.__init__TcCs&|jj||rt|||j||gfS)N)r.rrra)rr-rrorwrwrxrszNotAny.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrz~{r=)rrrUrr.)rrwrwrxrs   zNotAny.__str__)T)rrrrrrrrrwrw)rHrxrs   cs(eZdZdfdd ZdddZZS) _MultipleMatchNcsFtt|j|d|_|}t|tr.tj|}|dk r<|nd|_dS)NT) rrJrrWrzrr$rQ not_ender)rr.stopOnZender)rHrwrxrs   z_MultipleMatch.__init__Tc Cs|jj}|j}|jdk }|r$|jj}|r2|||||||dd\}}yZ|j } xJ|rb|||| rr|||} n|} ||| |\}} | s| jrT|| 7}qTWWnttfk rYnX||fS)NF)rp) r.rtrrKrr]rrr) rr-rroZself_expr_parseZself_skip_ignorablesZ check_enderZ try_not_enderrZhasIgnoreExprsrZ tmptokensrwrwrxrs,      z_MultipleMatch.parseImpl)N)T)rrrrrrrwrw)rHrxrJsrJc@seZdZdZddZdS)ra Repetition of one or more of the given expression. Parameters: - expr - expression that must match one or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] # could also be written as (attr_expr * (1,)).parseString(text).pprint() cCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrr<z}...)rrrUrr.)rrwrwrxr!s   zOneOrMore.__str__N)rrrrrrwrwrwrxrscs8eZdZdZd fdd Zd fdd Zdd ZZS) r2aw Optional repetition of zero or more of the given expression. Parameters: - expr - expression that must match zero or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example: similar to L{OneOrMore} Ncstt|j||dd|_dS)N)rLT)rr2rr[)rr.rL)rHrwrxr6szZeroOrMore.__init__Tc s6ytt|j|||Sttfk r0|gfSXdS)N)rr2rrr)rr-rro)rHrwrxr:szZeroOrMore.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrrz]...)rrrUrr.)rrwrwrxr@s   zZeroOrMore.__str__)N)T)rrrrrrrrrwrw)rHrxr2*s c@s eZdZddZeZddZdS) _NullTokencCsdS)NFrw)rrwrwrxrJsz_NullToken.__bool__cCsdS)Nrrw)rrwrwrxrMsz_NullToken.__str__N)rrrrr'rrwrwrwrxrMIsrMcs6eZdZdZeffdd Zd ddZddZZS) raa Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 # ZIP+4 form 12101-0001 # invalid ZIP 98765- ''') prints:: # traditional ZIP code 12345 ['12345'] # ZIP+4 form 12101-0001 ['12101-0001'] # invalid ZIP 98765- ^ FAIL: Expected end of text (at char 5), (line:1, col:6) cs.tt|j|dd|jj|_||_d|_dS)NF)rgT)rrrr.rWrr[)rr.r)rHrwrxrts zOptional.__init__Tc Cszy|jj|||dd\}}WnTttfk rp|jtk rh|jjr^t|jg}|j||jj<ql|jg}ng}YnX||fS)NF)rp)r.rtrrr_optionalNotMatchedrVr")rr-rrorrwrwrxrzs    zOptional.parseImplcCs4t|dr|jS|jdkr.dt|jd|_|jS)Nrrr )rrrUrr.)rrwrwrxrs   zOptional.__str__)T) rrrrrNrrrrrwrw)rHrxrQs" cs,eZdZdZd fdd Zd ddZZS) r(a Token for skipping over all undefined text until the matched expression is found. Parameters: - expr - target expression marking the end of the data to be skipped - include - (default=C{False}) if True, the target expression is also parsed (the skipped text and target expression are returned as a 2-element list). - ignore - (default=C{None}) used to define grammars (typically quoted strings and comments) that might contain false matches to the target expression - failOn - (default=C{None}) define expressions that are not allowed to be included in the skipped test; if found before the target expression is found, the SkipTo is not a match Example:: report = ''' Outstanding Issues Report - 1 Jan 2000 # | Severity | Description | Days Open -----+----------+-------------------------------------------+----------- 101 | Critical | Intermittent system crash | 6 94 | Cosmetic | Spelling error on Login ('log|n') | 14 79 | Minor | System slow when running too many reports | 47 ''' integer = Word(nums) SEP = Suppress('|') # use SkipTo to simply match everything up until the next SEP # - ignore quoted strings, so that a '|' character inside a quoted string does not match # - parse action will call token.strip() for each matched token, i.e., the description body string_data = SkipTo(SEP, ignore=quotedString) string_data.setParseAction(tokenMap(str.strip)) ticket_expr = (integer("issue_num") + SEP + string_data("sev") + SEP + string_data("desc") + SEP + integer("days_open")) for tkt in ticket_expr.searchString(report): print tkt.dump() prints:: ['101', 'Critical', 'Intermittent system crash', '6'] - days_open: 6 - desc: Intermittent system crash - issue_num: 101 - sev: Critical ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - days_open: 14 - desc: Spelling error on Login ('log|n') - issue_num: 94 - sev: Cosmetic ['79', 'Minor', 'System slow when running too many reports', '47'] - days_open: 47 - desc: System slow when running too many reports - issue_num: 79 - sev: Minor FNcs`tt|j|||_d|_d|_||_d|_t|t rFt j ||_ n||_ dt |j|_dS)NTFzNo match found for )rr(r ignoreExprr[r` includeMatchrrzrr$rQfailOnrr.ra)rrincluderrQ)rHrwrxrs zSkipTo.__init__Tc Cs,|}t|}|j}|jj}|jdk r,|jjnd}|jdk rB|jjnd} |} x| |kr|dk rh||| rhP| dk rx*y| || } Wqrtk rPYqrXqrWy||| dddWn tt fk r| d7} YqLXPqLWt|||j || }|||} t | } |j r$||||dd\}} | | 7} || fS)NF)rorprr)rp) rr.rtrQrrOrrrrrar"rP)rr-rror0rr.Z expr_parseZself_failOn_canParseNextZself_ignoreExpr_tryParseZtmplocZskiptextZ skipresultrrwrwrxrs<    zSkipTo.parseImpl)FNN)T)rrrrrrrrwrw)rHrxr(s6 csbeZdZdZdfdd ZddZddZd d Zd d Zgfd dZ ddZ fddZ Z S)raK Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. Note: take care when assigning to C{Forward} not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: fwdExpr << a | b | c will actually be evaluated as:: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. It is recommended that you explicitly group the values inserted into the C{Forward}:: fwdExpr << (a | b | c) Converting to use the '<<=' operator instead will avoid this problem. See L{ParseResults.pprint} for an example of a recursive parser created using C{Forward}. Ncstt|j|dddS)NF)rg)rrr)rr)rHrwrxrszForward.__init__cCsjt|trtj|}||_d|_|jj|_|jj|_|j|jj |jj |_ |jj |_ |j j |jj |S)N)rzrr$rQr.rUr`r[rrYrXrWr]r)rrrwrwrx __lshift__s      zForward.__lshift__cCs||>S)Nrw)rrrwrwrx __ilshift__'szForward.__ilshift__cCs d|_|S)NF)rX)rrwrwrxr*szForward.leaveWhitespacecCs$|js d|_|jdk r |jj|S)NT)r_r.r)rrwrwrxr.s   zForward.streamlinecCs>||kr0|dd|g}|jdk r0|jj||jgdS)N)r.rr)rrr7rwrwrxr5s   zForward.validatec Cs>t|dr|jS|jjdSd}Wd|j|_X|jjd|S)Nrz: ...Nonez: )rrrHrZ _revertClass_ForwardNoRecurser.r)rZ retStringrwrwrxr<s   zForward.__str__cs.|jdk rtt|jSt}||K}|SdS)N)r.rrr)rr)rHrwrxrMs  z Forward.copy)N) rrrrrrSrTrrrrrrrwrw)rHrxrs  c@seZdZddZdS)rVcCsdS)Nz...rw)rrwrwrxrVsz_ForwardNoRecurse.__str__N)rrrrrwrwrwrxrVUsrVcs"eZdZdZdfdd ZZS)r-zQ Abstract subclass of C{ParseExpression}, for converting parsed results. Fcstt|j|d|_dS)NF)rr-rrW)rr.rg)rHrwrxr]szTokenConverter.__init__)F)rrrrrrrwrw)rHrxr-Yscs6eZdZdZd fdd ZfddZdd ZZS) r a Converter to concatenate all matching tokens to a single string. By default, the matching patterns must also be contiguous in the input string; this can be disabled by specifying C{'adjacent=False'} in the constructor. Example:: real = Word(nums) + '.' + Word(nums) print(real.parseString('3.1416')) # -> ['3', '.', '1416'] # will also erroneously match the following print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] real = Combine(Word(nums) + '.' + Word(nums)) print(real.parseString('3.1416')) # -> ['3.1416'] # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) rTcs8tt|j||r|j||_d|_||_d|_dS)NT)rr rradjacentrX joinStringre)rr.rXrW)rHrwrxrrszCombine.__init__cs(|jrtj||ntt|j||S)N)rWr$rrr )rr)rHrwrxr|szCombine.ignorecCsP|j}|dd=|tdj|j|jg|jd7}|jrH|jrH|gS|SdS)Nr)r)rr"rr rXrbrVr)rr-rrZretToksrwrwrxrs  "zCombine.postParse)rT)rrrrrrrrrwrw)rHrxr as cs(eZdZdZfddZddZZS)ra Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. Example:: ident = Word(alphas) num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] cstt|j|d|_dS)NT)rrrrW)rr.)rHrwrxrszGroup.__init__cCs|gS)Nrw)rr-rrrwrwrxrszGroup.postParse)rrrrrrrrwrw)rHrxrs  cs(eZdZdZfddZddZZS)r aW Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) # access named fields as dict entries, or output as dict print(result['shape']) print(result.asDict()) prints:: ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} See more examples at L{ParseResults} of accessing fields by results name. cstt|j|d|_dS)NT)rr rrW)rr.)rHrwrxrsz Dict.__init__cCsxt|D]\}}t|dkr q |d}t|trBt|dj}t|dkr^td|||<q t|dkrt|dt rt|d|||<q |j}|d=t|dkst|tr|j rt||||<q t|d|||<q W|j r|gS|SdS)Nrrrrrq) rrrzrurrrr"rrrV)rr-rrrtokZikeyZ dictvaluerwrwrxrs$   zDict.postParse)rrrrrrrrwrw)rHrxr s# c@s eZdZdZddZddZdS)r+aV Converter for ignoring the results of a parsed expression. Example:: source = "a, b, c,d" wd = Word(alphas) wd_list1 = wd + ZeroOrMore(',' + wd) print(wd_list1.parseString(source)) # often, delimiters that are useful during parsing are just in the # way afterward - use Suppress to keep them out of the parsed output wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parseString(source)) prints:: ['a', ',', 'b', ',', 'c', ',', 'd'] ['a', 'b', 'c', 'd'] (See also L{delimitedList}.) cCsgS)Nrw)rr-rrrwrwrxrszSuppress.postParsecCs|S)Nrw)rrwrwrxrszSuppress.suppressN)rrrrrrrwrwrwrxr+sc@s(eZdZdZddZddZddZdS) rzI Wrapper for parse actions, to ensure they are only called once. cCst||_d|_dS)NF)rMcallablecalled)rZ methodCallrwrwrxrs zOnlyOnce.__init__cCs.|js|j|||}d|_|St||ddS)NTr)r[rZr)rrr5rvrrwrwrxrs zOnlyOnce.__call__cCs d|_dS)NF)r[)rrwrwrxreset szOnlyOnce.resetN)rrrrrrr\rwrwrwrxrsc s:tfdd}y j|_Wntk r4YnX|S)as Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <>entering %s(line: '%s', %d, %r) z<.z)rMrr)rr`rw)rrxrb s  ,FcCs`t|dt|dt|d}|rBt|t||j|S|tt||j|SdS)a Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing C{combine=True} in the constructor. If C{combine} is set to C{True}, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. Example:: delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] z [rz]...N)rr r2rir+)r.ZdelimcombineZdlNamerwrwrxr@9s $csjtfdd}|dkr0ttjdd}n|j}|jd|j|dd|jd td S) a: Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] cs.|d}|r ttg|p&tt>gS)Nr)rrrC)rr5rvr) arrayExprr.rwrxcountFieldParseAction_s"z+countedArray..countFieldParseActionNcSs t|dS)Nr)ru)rvrwrwrxrydszcountedArray..ZarrayLenT)rfz(len) z...)rr/rRrrrirxr)r.ZintExprrdrw)rcr.rxr<Ls cCs:g}x0|D](}t|tr(|jt|q |j|q W|S)N)rzrrrr)Lrrrwrwrxrks   rcs6tfdd}|j|ddjdt|S)a* Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. csP|rBt|dkr|d>qLt|j}tdd|D>n t>dS)Nrrrcss|]}t|VqdS)N)r)rttrwrwrxrszDmatchPreviousLiteral..copyTokenToRepeater..)rrrrr )rr5rvZtflat)reprwrxcopyTokenToRepeaters   z1matchPreviousLiteral..copyTokenToRepeaterT)rfz(prev) )rrxrir)r.rhrw)rgrxrOts  csFt|j}|Kfdd}|j|ddjdt|S)aS Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do I{not} use with packrat parsing enabled. cs*t|jfdd}j|dddS)Ncs$t|j}|kr tddddS)Nrr)rrr)rr5rvZ theseTokens) matchTokensrwrxmustMatchTheseTokenss zLmatchPreviousExpr..copyTokenToRepeater..mustMatchTheseTokensT)rf)rrr)rr5rvrj)rg)rirxrhs  z.matchPreviousExpr..copyTokenToRepeaterT)rfz(prev) )rrrxrir)r.Ze2rhrw)rgrxrNs cCs>xdD]}|j|t|}qW|jdd}|jdd}t|S)Nz\^-]rz\nr(z\t)r_bslashr)rrrwrwrxrs    rTc s|rdd}dd}tndd}dd}tg}t|trF|j}n&t|tjr\t|}ntj dt dd|svt Sd }x|t |d kr||}xnt ||d d D]N\}} || |r|||d =Pq||| r|||d =|j|| | }PqW|d 7}q|W| r|ryht |t d j|krZtd d jdd|Djdj|Stdjdd|Djdj|SWn&tk rtj dt ddYnXtfdd|Djdj|S)a Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns a C{L{MatchFirst}} for best performance. Parameters: - strs - a string of space-delimited literals, or a collection of string literals - caseless - (default=C{False}) - treat all literals as caseless - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) Example:: comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) prints:: [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] cSs|j|jkS)N)r)rbrwrwrxryszoneOf..cSs|jj|jS)N)rr)rrlrwrwrxryscSs||kS)Nrw)rrlrwrwrxryscSs |j|S)N)r)rrlrwrwrxrysz6Invalid argument to oneOf, expected string or iterablerq)rrrrNrz[%s]css|]}t|VqdS)N)r)rsymrwrwrxrszoneOf..z | |css|]}tj|VqdS)N)rdr )rrmrwrwrxrsz7Exception creating Regex for oneOf, building MatchFirstc3s|]}|VqdS)Nrw)rrm)parseElementClassrwrxrs)rrrzrrrr5rrrrrrrrrr'rirKr) ZstrsrZuseRegexZisequalZmasksZsymbolsrZcurrrrw)rorxrSsL         ((cCsttt||S)a Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the C{Dict} results can include named token fields. Example:: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) # similar to Dict, but simpler call format result = dictOf(attr_label, attr_value).parseString(text) print(result.dump()) print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} )r r2r)rrrwrwrxrAs!cCs^tjdd}|j}d|_|d||d}|r@dd}ndd}|j||j|_|S) a Helper to return the original, untokenized text for a given expression. Useful to restore the parsed fields of an HTML start tag into the raw tag text itself, or to revert separate tokens with intervening whitespace back to the original matching input text. By default, returns astring containing the original parsed text. If the optional C{asString} argument is passed as C{False}, then the return value is a C{L{ParseResults}} containing any results names that were originally matched, and a single token containing the original matched text from the input string. So if the expression passed to C{L{originalTextFor}} contains expressions with defined results names, you must set C{asString} to C{False} if you want to preserve those results name values. Example:: src = "this is test bold text normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) prints:: [' bold text '] ['text'] cSs|S)Nrw)rrrvrwrwrxry8sz!originalTextFor..F_original_start _original_endcSs||j|jS)N)rprq)rr5rvrwrwrxry=scSs&||jd|jdg|dd<dS)Nrprq)r)rr5rvrwrwrx extractText?sz$originalTextFor..extractText)r rrrer])r.ZasStringZ locMarkerZ endlocMarker matchExprrrrwrwrxrg s  cCst|jddS)zp Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. cSs|dS)Nrrw)rvrwrwrxryJszungroup..)r-r)r.rwrwrxrhEscCs4tjdd}t|d|d|jjdS)a Helper to decorate a returned token with its starting and ending locations in the input string. This helper adds the following results names: - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains C{} characters, you may want to call C{L{ParserElement.parseWithTabs}} Example:: wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) prints:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] cSs|S)Nrw)rr5rvrwrwrxry`szlocatedExpr..Z locn_startrZlocn_end)r rrrr)r.ZlocatorrwrwrxrjLsz\[]-*.$+^?()~ )r cCs |ddS)Nrrrrw)rr5rvrwrwrxryksryz\\0?[xX][0-9a-fA-F]+cCstt|djddS)Nrz\0x)unichrrulstrip)rr5rvrwrwrxrylsz \\0[0-7]+cCstt|ddddS)Nrrr)ruru)rr5rvrwrwrxrymsz\])rr z\wr8rrZnegatebodyr c sBddy djfddtj|jDStk r<dSXdS)a Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be: - a single character - an escaped character with a leading backslash (such as C{\-} or C{\]}) - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) (C{\0x##} is also supported for backwards compatibility) - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) cSs<t|ts|Sdjddtt|dt|ddDS)Nrcss|]}t|VqdS)N)ru)rrrwrwrxrsz+srange....rrr)rzr"rrord)prwrwrxryszsrange..rc3s|]}|VqdS)Nrw)rpart) _expandedrwrxrszsrange..N)r_reBracketExprrrxrK)rrw)r|rxr_rs  csfdd}|S)zt Helper method for defining parse actions that require matching at a specific column in the input text. cs"t||krt||ddS)Nzmatched token not at column %d)r9r)r)Zlocnr1)rrwrx verifyColsz!matchOnlyAtCol..verifyColrw)rr~rw)rrxrMs cs fddS)a Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString}()}. Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] csgS)Nrw)rr5rv)replStrrwrxryszreplaceWith..rw)rrw)rrxr\s cCs|dddS)a Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] rrrrsrw)rr5rvrwrwrxrZs c sNfdd}ytdtdj}Wntk rBt}YnX||_|S)aG Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional args are passed, they are forwarded to the given function as additional arguments after the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the parsed data to an integer using base 16. Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse ''') wd = Word(alphas).setParseAction(tokenMap(str.title)) OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') prints:: 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] my kingdom for a horse ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] csfdd|DS)Ncsg|]}|fqSrwrw)rZtokn)rr6rwrxrsz(tokenMap..pa..rw)rr5rv)rr6rwrxr}sztokenMap..parrH)rJrrKr{)r6rr}rLrw)rr6rxrms cCs t|jS)N)rr)rvrwrwrxryscCs t|jS)N)rlower)rvrwrwrxryscCst|tr|}t|| d}n|j}tttd}|rtjj t }t d|dt t t|t d|tddgdjd j d d t d }nd jddtD}tjj t t|B}t d|dt t t|j ttt d|tddgdjd j dd t d }ttd|d }|jdd j|jddjjjd|}|jdd j|jddjjjd|}||_||_||fS)zRInternal helper to construct opening and closing tag expressions, given a tag name)rz_-:rtag=/F)rrCcSs |ddkS)Nrrrw)rr5rvrwrwrxrysz_makeTags..rrcss|]}|dkr|VqdS)rNrw)rrrwrwrxrsz_makeTags..cSs |ddkS)Nrrrw)rr5rvrwrwrxryszrz)rzrrrr/r4r3r>rrrZr+r r2rrrmrrVrYrBr _Lrtitlerrir)tagStrZxmlZresnameZ tagAttrNameZ tagAttrValueZopenTagZprintablesLessRAbrackZcloseTagrwrwrx _makeTagss" T\..rcCs t|dS)a  Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. Example:: text = 'More info at the pyparsing wiki page' # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): # attributes in the tag (like "href" shown here) are also accessible as named results print(link.link_text, '->', link.href) prints:: pyparsing -> http://pyparsing.wikispaces.com F)r)rrwrwrxrKscCs t|dS)z Helper to construct opening and closing tag expressions for XML, given a tag name. Matches tags only in the given upper/lower case. Example: similar to L{makeHTMLTags} T)r)rrwrwrxrLscs8|r|ddn|jddDfdd}|S)a< Helper to create a validating parse action to be used with start tags created with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as C{} or C{
}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 NcSsg|]\}}||fqSrwrw)rrrrwrwrxrQsz!withAttribute..cs^xXD]P\}}||kr&t||d||tjkr|||krt||d||||fqWdS)Nzno matching attribute z+attribute '%s' has value '%s', must be '%s')rre ANY_VALUE)rr5rZattrNameZ attrValue)attrsrwrxr}Rs zwithAttribute..pa)r)rZattrDictr}rw)rrxres 2 cCs|r d|nd}tf||iS)a Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this <div> has no class
''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 z%s:classclass)re)Z classname namespaceZ classattrrwrwrxrk\s (rcCst}||||B}x`t|D]R\}}|d dd\}} } } | dkrTd|nd|} | dkr|dksxt|dkrtd|\} }tj| }| tjkrd| dkrt||t|t |}n| dkr|dk rt|||t|t ||}nt||t|t |}nD| dkrZt|| |||t|| |||}ntd n| tj krH| dkrt |t st |}t|j |t||}n| dkr|dk rt|||t|t ||}nt||t|t |}nD| dkr>t|| |||t|| |||}ntd ntd | r`|j| ||j| |BK}|}q"W||K}|S) a Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] Nrroz%s termz %s%s termrqz@if numterms=3, opExpr must be a tuple or list of two expressionsrrz6operator must be unary (1), binary (2), or ternary (3)z2operator must indicate right or left associativity)N)rrrrrirTLEFTrrrRIGHTrzrr.r)ZbaseExprZopListZlparZrparrZlastExprrZoperDefZopExprZarityZrightLeftAssocr}ZtermNameZopExpr1ZopExpr2ZthisExprrsrwrwrxrisR;    &       &   z4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"z string enclosed in double quotesz4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*'z string enclosed in single quotesz*quotedString using single or double quotesuzunicode string literalcCs||krtd|dkr(t|to,t|tr t|dkrt|dkr|dk rtt|t||tjddj dd}n$t j t||tjj dd}nx|dk rtt|t |t |ttjddj dd}n4ttt |t |ttjddj d d}ntd t }|dk rb|tt|t||B|Bt|K}n$|tt|t||Bt|K}|jd ||f|S) a~ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] z.opening and closing strings cannot be the sameNrr)r cSs |djS)Nr)r)rvrwrwrxry9sznestedExpr..cSs |djS)Nr)r)rvrwrwrxry<scSs |djS)Nr)r)rvrwrwrxryBscSs |djS)Nr)r)rvrwrwrxryFszOopening and closing arguments must be strings if no content expression is givenznested %s%s expression)rrzrrr rr r$rNrrCrrrrr+r2ri)openerZcloserZcontentrOrrwrwrxrPs4:     *$c sfdd}fdd}fdd}ttjdj}ttj|jd}tj|jd }tj|jd } |rtt||t|t|t|| } n$tt|t|t|t|} |j t t| jd S) a Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] csN|t|krdSt||}|dkrJ|dkr>t||dt||ddS)Nrrzillegal nestingznot a peer entryrsrs)rr9r!r)rr5rvcurCol) indentStackrwrxcheckPeerIndents     z&indentedBlock..checkPeerIndentcs2t||}|dkr"j|n t||ddS)Nrrznot a subentryrs)r9rr)rr5rvr)rrwrxcheckSubIndents   z%indentedBlock..checkSubIndentcsN|t|krdSt||}o4|dko4|dksBt||djdS)Nrrrqznot an unindentrsr:)rr9rr)rr5rvr)rrwrx checkUnindents    z$indentedBlock..checkUnindentz INDENTrZUNINDENTzindented block) rrrrr rrirrrrk) ZblockStatementExprrrrrrr!rZPEERZUNDENTZsmExprrw)rrxrfQsN   ,z#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]z[\0xa1-\0xbf\0xd7\0xf7]z_:zany tagzgt lt amp nbsp quot aposz><& "'z &(?Prnz);zcommon HTML entitycCs tj|jS)zRHelper parser action to replace common HTML entities with their special characters)_htmlEntityMaprZentity)rvrwrwrxr[sz/\*(?:[^*]|\*(?!/))*z*/zC style commentzz HTML commentz.*z rest of linez//(?:\\\n|[^\n])*z // commentzC++ style commentz#.*zPython style comment)rz commaItem)rc@seZdZdZeeZeeZe e j dj eZ e ej dj eedZedj dj eZej edej ej dZejd d eeeed jeBj d Zejeed j dj eZedj dj eZeeBeBjZedj dj eZe ededj dZedj dZ edj dZ!e!de!dj dZ"ee!de!d>dee!de!d?j dZ#e#j$d d d!e j d"Z%e&e"e%Be#Bj d#j d#Z'ed$j d%Z(e)d@d'd(Z*e)dAd*d+Z+ed,j d-Z,ed.j d/Z-ed0j d1Z.e/je0jBZ1e)d2d3Z2e&e3e4d4e5e e6d4d5ee7d6jj d7Z8e9ee:j;e8Bd8d9j d:Zd=S)Brna Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers}, L{reals}, L{scientific notation}) - common L{programming identifiers} - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - ISO8601 L{dates} and L{datetime} - L{UUID} - L{comma-separated list} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] integerz hex integerrtz[+-]?\d+zsigned integerrfractioncCs|d|dS)Nrrrrsrw)rvrwrwrxryszpyparsing_common.r8z"fraction or mixed integer-fractionz [+-]?\d+\.\d*z real numberz+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)z$real number with scientific notationz[+-]?\d+\.?\d*([eE][+-]?\d+)?fnumberrB identifierzK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}z IPv4 addressz[0-9a-fA-F]{1,4} hex_integerrzfull IPv6 addressrrBz::zshort IPv6 addresscCstdd|DdkS)Ncss|]}tjj|rdVqdS)rrN)rn _ipv6_partr)rrfrwrwrxrsz,pyparsing_common...rw)rH)rvrwrwrxrysz::ffff:zmixed IPv6 addressz IPv6 addressz:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}z MAC address%Y-%m-%dcsfdd}|S)a Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] csLytj|djStk rF}zt||t|WYdd}~XnXdS)Nr)rstrptimeZdaterrr{)rr5rvve)fmtrwrxcvt_fnsz.pyparsing_common.convertToDate..cvt_fnrw)rrrw)rrx convertToDates zpyparsing_common.convertToDate%Y-%m-%dT%H:%M:%S.%fcsfdd}|S)a Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] csHytj|dStk rB}zt||t|WYdd}~XnXdS)Nr)rrrrr{)rr5rvr)rrwrxrsz2pyparsing_common.convertToDatetime..cvt_fnrw)rrrw)rrxconvertToDatetimes z"pyparsing_common.convertToDatetimez7(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?z ISO8601 datez(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?zISO8601 datetimez2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}UUIDcCstjj|dS)a Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = 'More info at the
pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' r)rn_html_stripperr)rr5rrwrwrx stripHTMLTagss zpyparsing_common.stripHTMLTagsra)rz rr)rzcomma separated listcCs t|jS)N)rr)rvrwrwrxryscCs t|jS)N)rr)rvrwrwrxrysN)rrB)rrB)r)r)?rrrrrmruZconvertToIntegerfloatZconvertToFloatr/rRrirrrDrr'Zsigned_integerrrxrrZ mixed_integerrHrealZsci_realrnumberrr4r3rZ ipv4_addressrZ_full_ipv6_addressZ_short_ipv6_addressr~Z_mixed_ipv6_addressr Z ipv6_addressZ mac_addressrrrZ iso8601_dateZiso8601_datetimeuuidr7r6rrrrrrVr. _commasepitemr@rYrZcomma_separated_listrdrBrwrwrwrxrnsN"" 2   8__main__Zselectfromz_$r])rbcolumnsrjZtablesZcommandaK # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual z] 100 -100 +100 3.14159 6.02e23 1e-12 z 100 FF z6 12345678-1234-5678-1234-567812345678 )rq)raF)N)FT)T)r)T)r __version__Z__versionTime__ __author__rweakrefrrrr~rrdrrr"r<rr_threadr ImportErrorZ threadingrrZ ordereddict__all__r version_infor;rmaxsizerr{rchrrurrHrrreversedrrrr6r r rIZmaxintZxrangerZ __builtin__rZfnamerrJrrrrrrZascii_uppercaseZascii_lowercaser4rRrDr3rkrZ printablerVrKrrr!r#r&rr"MutableMappingregisterr9rJrGr/r2r4rQrMr$r,r rrrrQrrrrlr/r'r%r r.r0rrrr*r)r1r0r rrrr rrrrJrr2rMrNrr(rrVr-r rr r+rrbr@r<rrOrNrrSrArgrhrjrirCrIrHrar`rZ _escapedPuncZ_escapedHexCharZ_escapedOctCharUNICODEZ _singleCharZ _charRangermr}r_rMr\rZrmrdrBrrKrLrerrkrTrrrirUr>r^rYrcrPrfr5rWr7r6rrrrr;r[r8rErr]r?r=rFrXrrr:rnrZ selectTokenZ fromTokenZidentZ columnNameZcolumnNameListZ columnSpecZ tableNameZ tableNameListZ simpleSQLrrrrrrrwrwrwrx=s                 8      @d &A= I G3pLOD|M &#@sQ,A,    I# %     &0 ,   ? #k Zr   (  0     "PKtge[ __init__.pynu[PKye[C&fWfW appdirs.pynu[#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2005-2010 ActiveState Software Inc. # Copyright (c) 2013 Eddy Petrișor """Utilities for determining application-specific dirs. See for details and usage. """ # Dev Notes: # - MSDN on where to store app data files: # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html __version_info__ = (1, 4, 0) __version__ = '.'.join(map(str, __version_info__)) import sys import os PY3 = sys.version_info[0] == 3 if PY3: unicode = str if sys.platform.startswith('java'): import platform os_name = platform.java_ver()[3][0] if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. system = 'win32' elif os_name.startswith('Mac'): # "Mac OS X", etc. system = 'darwin' else: # "Linux", "SunOS", "FreeBSD", etc. # Setting this to "linux2" is not ideal, but only Windows or Mac # are actually checked for and the rest of the module expects # *sys.platform* style strings. system = 'linux2' else: system = sys.platform def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". """ if system == "win32": if appauthor is None: appauthor = appname const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.normpath(_get_win_folder(const)) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('~/Library/Application Support/') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): """Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: Mac OS X: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): """Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system in ["win32", "darwin"]: path = site_data_dir(appname, appauthor) if appname and version: path = os.path.join(path, version) else: # XDG default for $XDG_CONFIG_DIRS # only first, if multipath is False path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) if opinion: path = os.path.join(path, "Cache") elif system == 'darwin': path = os.path.expanduser('~/Library/Caches') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. """ if system == "darwin": path = os.path.join( os.path.expanduser('~/Library/Logs'), appname) elif system == "win32": path = user_data_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "Logs") else: path = user_cache_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "log") if appname and version: path = os.path.join(path, version) return path class AppDirs(object): """Convenience wrapper for getting application dirs.""" def __init__(self, appname, appauthor=None, version=None, roaming=False, multipath=False): self.appname = appname self.appauthor = appauthor self.version = version self.roaming = roaming self.multipath = multipath @property def user_data_dir(self): return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_data_dir(self): return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_config_dir(self): return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_config_dir(self): return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_cache_dir(self): return user_cache_dir(self.appname, self.appauthor, version=self.version) @property def user_log_dir(self): return user_log_dir(self.appname, self.appauthor, version=self.version) #---- internal support stuff def _get_win_folder_from_registry(csidl_name): """This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir def _get_win_folder_with_pywin32(csidl_name): from win32com.shell import shellcon, shell dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) # Try to make this a unicode path because SHGetFolderPath does # not return unicode strings when there is unicode data in the # path. try: dir = unicode(dir) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: try: import win32api dir = win32api.GetShortPathName(dir) except ImportError: pass except UnicodeError: pass return dir def _get_win_folder_with_ctypes(csidl_name): import ctypes csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def _get_win_folder_with_jna(csidl_name): import array from com.sun import jna from com.sun.jna.platform import win32 buf_size = win32.WinDef.MAX_PATH * 2 buf = array.zeros('c', buf_size) shell = win32.Shell32.INSTANCE shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) dir = jna.Native.toString(buf.tostring()).rstrip("\0") # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: buf = array.zeros('c', buf_size) kernel = win32.Kernel32.INSTANCE if kernal.GetShortPathName(dir, buf, buf_size): dir = jna.Native.toString(buf.tostring()).rstrip("\0") return dir if system == "win32": try: import win32com.shell _get_win_folder = _get_win_folder_with_pywin32 except ImportError: try: from ctypes import windll _get_win_folder = _get_win_folder_with_ctypes except ImportError: try: import com.sun.jna _get_win_folder = _get_win_folder_with_jna except ImportError: _get_win_folder = _get_win_folder_from_registry #---- self test code if __name__ == "__main__": appname = "MyApp" appauthor = "MyCompany" props = ("user_data_dir", "site_data_dir", "user_config_dir", "site_config_dir", "user_cache_dir", "user_log_dir") print("-- app dirs (with optional 'version')") dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'version')") dirs = AppDirs(appname, appauthor) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'appauthor')") dirs = AppDirs(appname) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (with disabled 'appauthor')") dirs = AppDirs(appname, appauthor=False) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) PKye[EkHkH"__pycache__/appdirs.cpython-36.pycnu[3 vhfW@sdZd1ZdjeeeZddlZddlZejddkZ e r>eZ ej j drddl Z e j ddZej drrd Zqej d rd Zqd Znej Zd2ddZd3ddZd4ddZd5ddZd6ddZd7ddZGdddeZddZdd Zd!d"Zd#d$Zed kryddlZeZWnnek rydd%l m!Z!eZWnBek r|yddl"Z#eZWnek rveZYnXYnXYnXe$d&kr~d'Z%d(Z&d8Z'e(d)ee%e&d*d+Z)x$e'D]Z*e(d,e*e+e)e*fqWe(d-ee%e&Z)x$e'D]Z*e(d,e*e+e)e*fqWe(d.ee%Z)x$e'D]Z*e(d,e*e+e)e*fq$We(d/ee%d d0Z)x$e'D]Z*e(d,e*e+e)e*fq^WdS)9zyUtilities for determining application-specific dirs. See for details and usage. .NjavaZWindowswin32ZMacdarwinZlinux2FcCstdkr^|dkr|}|rdpd}tjjt|}|r|dk rNtjj|||}qtjj||}nNtdkrtjjd}|rtjj||}n&tjdtjjd }|rtjj||}|r|rtjj||}|S) aJReturn full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". rN CSIDL_APPDATACSIDL_LOCAL_APPDATAFrz~/Library/Application Support/Z XDG_DATA_HOMEz~/.local/share)systemospathnormpath_get_win_folderjoin expandusergetenv)appname appauthorversionroamingconstr r/usr/lib/python3.6/appdirs.py user_data_dir-s&   rcs tdkrR|dkr}tjjtd}r|dk rBtjj||}qtjj|}ntdkrztjjd}rtjj|}nttjdtjjdd g}d d |j tjD}r|rtjj|fd d |D}|rtjj|}n|d }|So|rtjj||}|S)aiReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: Mac OS X: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. rNCSIDL_COMMON_APPDATAFrz/Library/Application SupportZ XDG_DATA_DIRSz/usr/local/sharez /usr/sharecSs g|]}tjj|jtjqSr)r r rrstripsep).0xrrr sz!site_data_dir..csg|]}tjj|gqSr)r rr)rr)rrrr sr) r r r rrrrrpathsepsplit)rrr multipathr pathlistr)rr site_data_dirds4  r%cCsXtdkrt||d|}n&tjdtjjd}|r>tjj||}|rT|rTtjj||}|S)aReturn full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". rrNZXDG_CONFIG_HOMEz ~/.config)rr)r rr rr rr)rrrrr rrruser_config_dirsr&cstd kr*t|}r|rtjj||}ndtjdd}dd|jtjD}rt|rbtjj|fdd|D}|rtjj|}n|d}|S) aReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. rrZXDG_CONFIG_DIRSz/etc/xdgcSs g|]}tjj|jtjqSr)r r rrr)rrrrrr sz#site_config_dir..csg|]}tjj|gqSr)r rr)rr)rrrr sr)rr)r r%r r rrr"r!)rrrr#r r$r)rrsite_config_dirs  r'TcCstdkrd|dkr|}tjjtd}|r|dk rBtjj|||}ntjj||}|rtjj|d}nNtdkrtjjd}|rtjj||}n&tjdtjjd }|rtjj||}|r|rtjj||}|S) aReturn full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. rNr FZCacherz~/Library/CachesZXDG_CACHE_HOMEz~/.cache)r r r rrrrr)rrropinionr rrruser_cache_dirs(! r)cCstdkr tjjtjjd|}nNtdkrLt|||}d}|rntjj|d}n"t|||}d}|rntjj|d}|r|rtjj||}|S)aReturn full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. rz~/Library/LogsrFZLogslog)r r r rrrr))rrrr(r rrr user_log_dir:s    r+c@sbeZdZdZdddZeddZedd Zed d Zed d Z eddZ eddZ dS)AppDirsz1Convenience wrapper for getting application dirs.NFcCs"||_||_||_||_||_dS)N)rrrrr#)selfrrrrr#rrr__init__os zAppDirs.__init__cCst|j|j|j|jdS)N)rr)rrrrr)r-rrrrws zAppDirs.user_data_dircCst|j|j|j|jdS)N)rr#)r%rrrr#)r-rrrr%|s zAppDirs.site_data_dircCst|j|j|j|jdS)N)rr)r&rrrr)r-rrrr&s zAppDirs.user_config_dircCst|j|j|j|jdS)N)rr#)r'rrrr#)r-rrrr's zAppDirs.site_config_dircCst|j|j|jdS)N)r)r)rrr)r-rrrr)s zAppDirs.user_cache_dircCst|j|j|jdS)N)r)r+rrr)r-rrrr+s zAppDirs.user_log_dir)NNFF) __name__ __module__ __qualname____doc__r.propertyrr%r&r'r)r+rrrrr,ms      r,cCs:ddl}dddd|}|j|jd}|j||\}}|S)zThis is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. rNZAppDatazCommon AppDataz Local AppData)r rr z@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders)_winregOpenKeyHKEY_CURRENT_USERZ QueryValueEx) csidl_namer4Zshell_folder_namekeydirtyperrr_get_win_folder_from_registrysr;cCsddlm}m}|jdt||dd}y`t|}d}x|D]}t|dkr:d}Pq:W|ryddl}|j|}Wnt k rYnXWnt k rYnX|S)Nr)shellconshellFT) win32com.shellr<r=SHGetFolderPathgetattrunicodeordwin32apiGetShortPathName ImportError UnicodeError)r7r<r=r9 has_high_charcrDrrr_get_win_folder_with_pywin32s$   rJcCsddl}dddd|}|jd}|jjjd|dd|d}x|D]}t|dkrBd }PqBW|r|jd}|jjj|j|dr|}|jS) Nr#)r rr iFr>T) ctypesZcreate_unicode_bufferwindllZshell32ZSHGetFolderPathWrCZkernel32ZGetShortPathNameWvalue)r7rNZ csidl_constbufrHrIZbuf2rrr_get_win_folder_with_ctypess"    rRc Csddl}ddlm}ddlm}|jjd}|jd|}|jj }|j dt |j |d|j j ||jj|jjd}d}x|D]} t| dkr~d }Pq~W|r|jd|}|jj } tj|||r|jj|jjd}|S) Nr)jna)rrIFr>T)arrayZcom.sunrSZcom.sun.jna.platformrZWinDefZMAX_PATHZzerosZShell32ZINSTANCEr@rAZShlObjZSHGFP_TYPE_CURRENTZNativeZtoStringZtostringrrCZKernel32ZkernalrE) r7rVrSrZbuf_sizerQr=r9rHrIZkernelrrr_get_win_folder_with_jnas&       rW)rO__main__ZMyAppZ MyCompanyz%-- app dirs (with optional 'version')z1.0)rz%s: %sz) -- app dirs (without optional 'version')z+ -- app dirs (without optional 'appauthor')z( -- app dirs (with disabled 'appauthor'))r)rrr)NNNF)NNNF)NNNF)NNNF)NNNT)NNNT)rr%r&r'r)r+),r2Z__version_info__rmapstr __version__sysr version_infoZPY3rBplatform startswithZjava_verZos_namer rr%r&r'r)r+objectr,r;rJrRrWr?Zwin32comrrFrNrOZ com.sun.jnaZcomr/rrZpropsprintdirsZproprArrrr s~    7 B ( 3 9 3+         PKye[EkHkH(__pycache__/appdirs.cpython-36.opt-1.pycnu[3 vhfW@sdZd1ZdjeeeZddlZddlZejddkZ e r>eZ ej j drddl Z e j ddZej drrd Zqej d rd Zqd Znej Zd2ddZd3ddZd4ddZd5ddZd6ddZd7ddZGdddeZddZdd Zd!d"Zd#d$Zed kryddlZeZWnnek rydd%l m!Z!eZWnBek r|yddl"Z#eZWnek rveZYnXYnXYnXe$d&kr~d'Z%d(Z&d8Z'e(d)ee%e&d*d+Z)x$e'D]Z*e(d,e*e+e)e*fqWe(d-ee%e&Z)x$e'D]Z*e(d,e*e+e)e*fqWe(d.ee%Z)x$e'D]Z*e(d,e*e+e)e*fq$We(d/ee%d d0Z)x$e'D]Z*e(d,e*e+e)e*fq^WdS)9zyUtilities for determining application-specific dirs. See for details and usage. .NjavaZWindowswin32ZMacdarwinZlinux2FcCstdkr^|dkr|}|rdpd}tjjt|}|r|dk rNtjj|||}qtjj||}nNtdkrtjjd}|rtjj||}n&tjdtjjd }|rtjj||}|r|rtjj||}|S) aJReturn full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". rN CSIDL_APPDATACSIDL_LOCAL_APPDATAFrz~/Library/Application Support/Z XDG_DATA_HOMEz~/.local/share)systemospathnormpath_get_win_folderjoin expandusergetenv)appname appauthorversionroamingconstr r/usr/lib/python3.6/appdirs.py user_data_dir-s&   rcs tdkrR|dkr}tjjtd}r|dk rBtjj||}qtjj|}ntdkrztjjd}rtjj|}nttjdtjjdd g}d d |j tjD}r|rtjj|fd d |D}|rtjj|}n|d }|So|rtjj||}|S)aiReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: Mac OS X: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. rNCSIDL_COMMON_APPDATAFrz/Library/Application SupportZ XDG_DATA_DIRSz/usr/local/sharez /usr/sharecSs g|]}tjj|jtjqSr)r r rrstripsep).0xrrr sz!site_data_dir..csg|]}tjj|gqSr)r rr)rr)rrrr sr) r r r rrrrrpathsepsplit)rrr multipathr pathlistr)rr site_data_dirds4  r%cCsXtdkrt||d|}n&tjdtjjd}|r>tjj||}|rT|rTtjj||}|S)aReturn full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". rrNZXDG_CONFIG_HOMEz ~/.config)rr)r rr rr rr)rrrrr rrruser_config_dirsr&cstd kr*t|}r|rtjj||}ndtjdd}dd|jtjD}rt|rbtjj|fdd|D}|rtjj|}n|d}|S) aReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. rrZXDG_CONFIG_DIRSz/etc/xdgcSs g|]}tjj|jtjqSr)r r rrr)rrrrrr sz#site_config_dir..csg|]}tjj|gqSr)r rr)rr)rrrr sr)rr)r r%r r rrr"r!)rrrr#r r$r)rrsite_config_dirs  r'TcCstdkrd|dkr|}tjjtd}|r|dk rBtjj|||}ntjj||}|rtjj|d}nNtdkrtjjd}|rtjj||}n&tjdtjjd }|rtjj||}|r|rtjj||}|S) aReturn full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. rNr FZCacherz~/Library/CachesZXDG_CACHE_HOMEz~/.cache)r r r rrrrr)rrropinionr rrruser_cache_dirs(! r)cCstdkr tjjtjjd|}nNtdkrLt|||}d}|rntjj|d}n"t|||}d}|rntjj|d}|r|rtjj||}|S)aReturn full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. rz~/Library/LogsrFZLogslog)r r r rrrr))rrrr(r rrr user_log_dir:s    r+c@sbeZdZdZdddZeddZedd Zed d Zed d Z eddZ eddZ dS)AppDirsz1Convenience wrapper for getting application dirs.NFcCs"||_||_||_||_||_dS)N)rrrrr#)selfrrrrr#rrr__init__os zAppDirs.__init__cCst|j|j|j|jdS)N)rr)rrrrr)r-rrrrws zAppDirs.user_data_dircCst|j|j|j|jdS)N)rr#)r%rrrr#)r-rrrr%|s zAppDirs.site_data_dircCst|j|j|j|jdS)N)rr)r&rrrr)r-rrrr&s zAppDirs.user_config_dircCst|j|j|j|jdS)N)rr#)r'rrrr#)r-rrrr's zAppDirs.site_config_dircCst|j|j|jdS)N)r)r)rrr)r-rrrr)s zAppDirs.user_cache_dircCst|j|j|jdS)N)r)r+rrr)r-rrrr+s zAppDirs.user_log_dir)NNFF) __name__ __module__ __qualname____doc__r.propertyrr%r&r'r)r+rrrrr,ms      r,cCs:ddl}dddd|}|j|jd}|j||\}}|S)zThis is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. rNZAppDatazCommon AppDataz Local AppData)r rr z@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders)_winregOpenKeyHKEY_CURRENT_USERZ QueryValueEx) csidl_namer4Zshell_folder_namekeydirtyperrr_get_win_folder_from_registrysr;cCsddlm}m}|jdt||dd}y`t|}d}x|D]}t|dkr:d}Pq:W|ryddl}|j|}Wnt k rYnXWnt k rYnX|S)Nr)shellconshellFT) win32com.shellr<r=SHGetFolderPathgetattrunicodeordwin32apiGetShortPathName ImportError UnicodeError)r7r<r=r9 has_high_charcrDrrr_get_win_folder_with_pywin32s$   rJcCsddl}dddd|}|jd}|jjjd|dd|d}x|D]}t|dkrBd }PqBW|r|jd}|jjj|j|dr|}|jS) Nr#)r rr iFr>T) ctypesZcreate_unicode_bufferwindllZshell32ZSHGetFolderPathWrCZkernel32ZGetShortPathNameWvalue)r7rNZ csidl_constbufrHrIZbuf2rrr_get_win_folder_with_ctypess"    rRc Csddl}ddlm}ddlm}|jjd}|jd|}|jj }|j dt |j |d|j j ||jj|jjd}d}x|D]} t| dkr~d }Pq~W|r|jd|}|jj } tj|||r|jj|jjd}|S) Nr)jna)rrIFr>T)arrayZcom.sunrSZcom.sun.jna.platformrZWinDefZMAX_PATHZzerosZShell32ZINSTANCEr@rAZShlObjZSHGFP_TYPE_CURRENTZNativeZtoStringZtostringrrCZKernel32ZkernalrE) r7rVrSrZbuf_sizerQr=r9rHrIZkernelrrr_get_win_folder_with_jnas&       rW)rO__main__ZMyAppZ MyCompanyz%-- app dirs (with optional 'version')z1.0)rz%s: %sz) -- app dirs (without optional 'version')z+ -- app dirs (without optional 'appauthor')z( -- app dirs (with disabled 'appauthor'))r)rrr)NNNF)NNNF)NNNF)NNNF)NNNT)NNNT)rr%r&r'r)r+),r2Z__version_info__rmapstr __version__sysr version_infoZPY3rBplatform startswithZjava_verZos_namer rr%r&r'r)r+objectr,r;rJrRrWr?Zwin32comrrFrNrOZ com.sun.jnaZcomr/rrZpropsprintdirsZproprArrrr s~    7 B ( 3 9 3+         PKe[,؂؂ pyparsing.pycnu[ abci@sdZdZdZdZddlZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlmZyddlmZWn!ek rddlmZnXydd l mZWn?ek r=ydd lmZWnek r9eZnXnXd d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgiZee jds ZedtdskZere jZ e!Z"e#Z$e!Z%e&e'e(e)e*ee+e,e-e.e/g Z0nre j1Z e2Z3duZ%gZ0ddl4Z4xEdvj5D]7Z6ye0j7e8e4e6Wne9k rZq$nXq$We:dwe3dxDZ;dyZ<dze=fd{YZ>ej?ej@ZAd|ZBeBd}ZCeAeBZDe#d~ZEdjFdejGDZHd!eIfdYZJd#eJfdYZKd%eJfdYZLd'eLfdYZMd*eIfdYZNde=fdYZOd&e=fdYZPe jQjRePdZSdZTdZUdZVdZWdZXdZYddZZd(e=fdYZ[d0e[fdYZ\de\fdYZ]de\fdYZ^de\fdYZ_e_Z`e_e[_ade\fdYZbd e_fdYZcd ebfdYZddpe\fdYZed3e\fdYZfd+e\fdYZgd)e\fdYZhd e\fdYZid2e\fdYZjde\fdYZkdekfdYZldekfdYZmdekfdYZnd.ekfdYZod-ekfdYZpd5ekfdYZqd4ekfdYZrd$e[fdYZsd esfdYZtd esfdYZudesfdYZvdesfdYZwd"e[fdYZxdexfdYZydexfdYZzdexfdYZ{de{fdYZ|d6e{fdYZ}de=fdYZ~e~ZdexfdYZd,exfdYZdexfdYZdefdYZd1exfdYZdefdYZdefdYZdefdYZd/efdYZde=fdYZdZdedZedZdZdZdZdZeedZdZedZdZdZe]jdGZemjdMZenjdLZeojdeZepjddZefeEdddjdZegdjdZegdjdZeeBeBefeHddddxBegde jBZeeedeZe_dedjdee|eeBjddZdZdZdZdZdZedZedZdZdZdZdZe=e_ddZe>Ze=e_e=e_ededdZeZeegddjdZeegddjdZeegddegddBjdZee`dejjdZddeejdZedZedZedZeefeAeDdjd\ZZeedj5dZegddjFejdjdZdZeegddjdZegdjdZegd jjd Zegd jd ZeegddeBjd ZeZegdjdZee|efeHddeefde_denjjdZeeejeBddjd>ZdrfdYZedkrecdZecdZefeAeDdZeeddejeZeeejdZdeBZeeddejeZeeejdZededeedZejdejjdejjdejjd ddlZejjeejejjd!ndS("sS pyparsing module - Classes and methods to define and execute parsing grammars The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements (L{'+'} operator gives L{And} expressions, strings are auto-converted to L{Literal} expressions):: from pyparsing import Word, alphas # define grammar of a greeting greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments s2.1.10s07 Oct 2016 01:31 UTCs*Paul McGuire iN(tref(tdatetime(tRLock(t OrderedDicttAndtCaselessKeywordtCaselessLiteralt CharsNotIntCombinetDicttEachtEmptyt FollowedBytForwardt GoToColumntGrouptKeywordtLineEndt LineStarttLiteralt MatchFirsttNoMatchtNotAnyt OneOrMoretOnlyOncetOptionaltOrtParseBaseExceptiontParseElementEnhancetParseExceptiontParseExpressiontParseFatalExceptiont ParseResultstParseSyntaxExceptiont ParserElementt QuotedStringtRecursiveGrammarExceptiontRegextSkipTot StringEndt StringStarttSuppresstTokentTokenConvertertWhitetWordtWordEndt WordStartt ZeroOrMoret alphanumstalphast alphas8bitt anyCloseTagt anyOpenTagt cStyleCommenttcoltcommaSeparatedListtcommonHTMLEntityt countedArraytcppStyleCommenttdblQuotedStringtdblSlashCommentt delimitedListtdictOftdowncaseTokenstemptythexnumst htmlCommenttjavaStyleCommenttlinetlineEndt lineStarttlinenot makeHTMLTagst makeXMLTagstmatchOnlyAtColtmatchPreviousExprtmatchPreviousLiteralt nestedExprtnullDebugActiontnumstoneOftopAssoctoperatorPrecedencet printablestpunc8bittpythonStyleCommentt quotedStringt removeQuotestreplaceHTMLEntityt replaceWitht restOfLinetsglQuotedStringtsranget stringEndt stringStartttraceParseActiont unicodeStringt upcaseTokenst withAttributet indentedBlocktoriginalTextFortungroupt infixNotationt locatedExprt withClasst CloseMatchttokenMaptpyparsing_commoniicCs}t|tr|Syt|SWnUtk rxt|jtjd}td}|jd|j |SXdS(sDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It then < returns the unicode object | encodes it with the default encoding | ... >. txmlcharrefreplaces&#\d+;cSs#dtt|ddd!dS(Ns\uiii(thextint(tt((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyttN( t isinstancetunicodetstrtUnicodeEncodeErrortencodetsystgetdefaultencodingR%tsetParseActionttransformString(tobjtrett xmlcharref((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_ustrs  s6sum len sorted reversed list tuple set any all min maxccs|] }|VqdS(N((t.0ty((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sicCsRd}ddjD}x/t||D]\}}|j||}q,W|S(s/Escape &, <, >, ", ', etc. in a string of data.s&><"'css|]}d|dVqdS(t&t;N((Rts((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys ssamp gt lt quot apos(tsplittziptreplace(tdatat from_symbolst to_symbolstfrom_tto_((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _xml_escapes t _ConstantscBseZRS((t__name__t __module__(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRst 0123456789t ABCDEFabcdefi\Rrccs$|]}|tjkr|VqdS(N(tstringt whitespace(Rtc((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys scBs_eZdZdd d dZedZdZdZdZ ddZ d Z RS( s7base exception class for all parsing runtime exceptionsicCs[||_|dkr*||_d|_n||_||_||_|||f|_dS(NRr(tloctNonetmsgtpstrt parserElementtargs(tselfRRRtelem((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__init__s       cCs||j|j|j|jS(s internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses (RRRR(tclstpe((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_from_exceptionscCsm|dkrt|j|jS|dkr>t|j|jS|dkr]t|j|jSt|dS(ssupported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text RHR7tcolumnREN(R7R(RHRRR7REtAttributeError(Rtaname((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt __getattr__s   cCs d|j|j|j|jfS(Ns"%s (at char %d), (line:%d, col:%d)(RRRHR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__str__scCs t|S(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__repr__ss>!} ('-' operator) indicates that parsing is to stop immediately because an unbacktrackable syntax error has been found(RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR!scBs eZdZdZdZRS(sZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs ||_dS(N(tparseElementTrace(RtparseElementList((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs d|jS(NsRecursiveGrammarException: %s(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s(RRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR$s t_ParseResultsWithOffsetcBs,eZdZdZdZdZRS(cCs||f|_dS(N(ttup(Rtp1tp2((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR$scCs |j|S(N(R(Rti((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt __getitem__&scCst|jdS(Ni(treprR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR(scCs|jd|f|_dS(Ni(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt setOffset*s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR#s   cBseZdZd-d-eedZd-d-eeedZdZedZ dZ dZ dZ dZ e Zd Zd Zd Zd Zd ZereZeZeZn-eZeZeZdZdZdZdZdZd-dZdZdZdZ dZ!dZ"dZ#dZ$dZ%dZ&dZ'ddZ(d Z)d!Z*d"Z+d-e,ded#Z-d$Z.d%Z/dd&ed'Z0d(Z1d)Z2d*Z3d+Z4d,Z5RS(.sI Structured parse results, to provide multiple means of access to the parsed data: - as a list (C{len(results)}) - by list index (C{results[0], results[1]}, etc.) - by attribute (C{results.} - see L{ParserElement.setResultsName}) Example:: integer = Word(nums) date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") # parseString returns a ParseResults object result = date_str.parseString("1999/12/31") def test(s, fn=repr): print("%s -> %s" % (s, fn(eval(s)))) test("list(result)") test("result[0]") test("result['month']") test("result.day") test("'month' in result") test("'minutes' in result") test("result.dump()", str) prints:: list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' result.day -> '31' 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - day: 31 - month: 12 - year: 1999 cCs/t||r|Stj|}t|_|S(N(Rstobjectt__new__tTruet_ParseResults__doinit(RttoklisttnametasListtmodaltretobj((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs  cCs|jrt|_d|_d|_i|_||_||_|dkrTg}n||trp||_ n-||t rt||_ n |g|_ t |_ n|dk r|r|sd|j|s(R(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _itervaluesscsfdjDS(Nc3s|]}||fVqdS(N((RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(R(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _iteritemsscCst|jS(sVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).(RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytkeysscCst|jS(sXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).(Rt itervalues(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytvaluesscCst|jS(sfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).(Rt iteritems(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs t|jS(sSince keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.(tboolR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pythaskeysscOs|sdg}nxI|jD];\}}|dkrJ|d|f}qtd|qWt|dtst|dks|d|kr|d}||}||=|S|d}|SdS(s Removes and returns item at specified index (default=C{last}). Supports both C{list} and C{dict} semantics for C{pop()}. If passed no argument or an integer argument, it will use C{list} semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use C{dict} semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in C{dict.pop()}. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] itdefaultis-pop() got an unexpected keyword argument '%s'iN(RRRsRoR(RRtkwargsRRtindexR}t defaultvalue((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpops"     cCs||kr||S|SdS(si Returns named result matching the given key, or if there is no such name, then returns the given C{defaultValue} or C{None} if no C{defaultValue} is specified. Similar to C{dict.get()}. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' print(result.get("hour", "not specified")) # -> 'not specified' print(result.get("hour")) # -> None N((Rtkeyt defaultValue((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs cCsw|jj||x]|jjD]L\}}x=t|D]/\}\}}t||||k|| ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] N(RtinsertRRRR(RRtinsStrRRRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR2scCs|jj|dS(s Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] N(Rtappend(Rtitem((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRFs cCs0t|tr||7}n|jj|dS(s Add sequence of elements to end of ParseResults list of elements. Example:: patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) return ''.join(tokens) print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' N(RsR Rtextend(Rtitemseq((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs  cCs|j2|jjdS(s7 Clear all elements and results names. N(RRtclear(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRfscCsy ||SWntk r dSX||jkr}||jkrR|j|ddStg|j|D]}|d^qcSndSdS(NRrii(RRRR (RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRms  +cCs|j}||7}|S(N(R(RtotherR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__add__{s  c s|jrt|jfd}|jj}g|D]<\}}|D])}|t|d||df^qMq=}xJ|D]?\}}|||st](RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsRrcCsog}xb|jD]W}|r2|r2|j|nt|trT||j7}q|jt|qW|S(N(RRRsR t _asStringListR(RtseptoutR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs cCs5g|jD]'}t|tr+|jn|^q S(s Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] (RRsR R(Rtres((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscsGtr|j}n |j}fdtfd|DS(s Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} csMt|trE|jr%|jSg|D]}|^q,Sn|SdS(N(RsR RtasDict(R|R(ttoItem(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs    c3s'|]\}}||fVqdS(N((RRR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(tPY_3RRR(Rtitem_fn((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs    cCsPt|j}|jj|_|j|_|jj|j|j|_|S(sA Returns a new copy of a C{ParseResults} object. (R RRRRRR R(RR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs   c Csd}g}td|jjD}|d}|sPd}d}d}nd } |d k rk|} n|jr|j} n| s|rdSd} n|||d| dg7}x t|jD]\} } t| trI| |kr|| j || |o|d k||g7}q|| j d |o6|d k||g7}qd } | |krh|| } n| s|rzqqd} nt t | } |||d| d| d| dg 7}qW|||d| dg7}dj |S( s (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. s css2|](\}}|D]}|d|fVqqdS(iN((RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s s RrtITEMtsgss %s%s- %s: s icss|]}t|tVqdS(N(RsR (Rtvv((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sss %s%s[%d]: %s%s%sRr( RRRRtsortedRRsR tdumpRtanyRR( RR$tdepthtfullRtNLRRRRR1((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR3Ps,  B?cOstj|j||dS(s Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] N(tpprintR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR8}scCsC|j|jj|jdk r-|jp0d|j|jffS(N(RRRRRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt __getstate__s  cCsm|d|_|d\|_}}|_i|_|jj||dk r`t||_n d|_dS(Nii(RRRRR RRR(RtstateR/t inAccumNames((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt __setstate__s   cCs|j|j|j|jfS(N(RRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__getnewargs__scCs tt|t|jS(N(RRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsN(6RRRRRRRsRRRRRRRt __nonzero__RRRRRRRRRRRRRRRRRRRRR RRRRRRRRRR!R-R0R3R8R9R<R=R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR -sh& '              4             # =  %-   cCsW|}d|ko#t|knr@||ddkr@dS||jdd|S(sReturns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. iis (Rtrfind(RtstrgR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR7s cCs|jdd|dS(sReturns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. s ii(tcount(RR@((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRHs cCsR|jdd|}|jd|}|dkrB||d|!S||dSdS(sfReturns the line of text containing loc within a string, counting newlines as line separators. s iiN(R?tfind(RR@tlastCRtnextCR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyREs  cCsAdt|dt|dt||t||fGHdS(NsMatch s at loc s(%d,%d)(RRHR7(tinstringRtexpr((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultStartDebugActionscCs'dt|dt|jGHdS(NsMatched s -> (RRuR(REtstartloctendlocRFttoks((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultSuccessDebugActionscCsdt|GHdS(NsException raised:(R(RERRFtexc((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultExceptionDebugActionscGsdS(sG'Do-nothing' debug action, to suppress debugging output during parsing.N((R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyROsics tkrfdSdgtgtd dkrVdd}ddntj}tjd}|d dd }|d|d |ffd }d }y"tdtdj}Wntk rt }nX||_|S(Ncs |S(N((RtlRp(tfunc(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRriiiicSsJtdkrdnd}tjd| |d|}|j|jfgS( Niiiiitlimiti(iii(tsystem_versiont tracebackt extract_stacktfilenameRH(RPR t frame_summary((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRSscSs2tj|d|}|d}|j|jfgS(NRPi(RRt extract_tbRTRH(ttbRPtframesRU((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRVs iRPiicsxy&|d}td<|SWqtk rdrInAz:tjd}|dddd ksnWd~Xdkrdcd7Rt __class__(ii( tsingleArgBuiltinsRRQRRRSRVtgetattrRt ExceptionRu(ROR[RSt LINE_DIFFt this_lineR]t func_name((RVRZRORPR[R\s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _trim_aritys*          cBseZdZdZeZedZedZedZ dZ dZ edZ e dZd Zd Zd Zd Zd ZdZe dZdZe e dZdZdZdefdYZedFk rdefdYZndefdYZiZe Z!ddgZ"e e dZ#eZ$edZ%eZ&eddZ'edZ(e)edZ*d Z+e)d!Z,e)ed"Z-d#Z.d$Z/d%Z0d&Z1d'Z2d(Z3d)Z4d*Z5d+Z6d,Z7d-Z8d.Z9d/Z:dFd0Z;d1Z<d2Z=d3Z>d4Z?d5Z@d6ZAe d7ZBd8ZCd9ZDd:ZEd;ZFgd<ZGed=ZHd>ZId?ZJd@ZKdAZLdBZMe dCZNe dDe e edEZORS(Gs)Abstract base level parser element class.s cCs |t_dS(s Overrides the default whitespace chars Example:: # default whitespace chars are space, and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] N(R"tDEFAULT_WHITE_CHARS(tchars((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDefaultWhitespaceChars=s cCs |t_dS(s Set class to be used for inclusion of string literals into a parser. Example:: # default literal class used is Literal integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] N(R"t_literalStringClass(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytinlineLiteralsUsingLscCst|_d|_d|_d|_||_t|_t j |_ t|_ t |_t |_t|_t |_t |_t|_d|_t|_d|_d|_t|_t |_dS(NRr(NNN(Rt parseActionRt failActiontstrReprt resultsNamet saveAsListRtskipWhitespaceR"Rft whiteCharstcopyDefaultWhiteCharsRtmayReturnEmptytkeepTabst ignoreExprstdebugt streamlinedt mayIndexErrorterrmsgt modalResultst debugActionstret callPreparset callDuringTry(Rtsavelist((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRas(                   cCsEtj|}|j|_|j|_|jrAtj|_n|S(s$ Make a copy of this C{ParserElement}. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of C{expr.copy()} is just C{expr()}:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") (RRkRuRrR"RfRq(Rtcpy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRxs    cCs>||_d|j|_t|dr:|j|j_n|S(sf Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) s Expected t exception(RRyRRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetNames  cCsE|j}|jdr.|d }t}n||_| |_|S(sP Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") t*i(RtendswithRRnRz(RRtlistAllMatchestnewself((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetResultsNames     csa|r9|jttfd}|_||_n$t|jdr]|jj|_n|S(sMethod to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. cs)ddl}|j||||S(Ni(tpdbt set_trace(RERt doActionst callPreParseR(t _parseMethod(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytbreakers  t_originalParseMethod(t_parseRRR(Rt breakFlagR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetBreaks   cOs7tttt||_|jdt|_|S(s  Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}} for more information on parsing strings containing C{}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. Example:: integer = Word(nums) date_str = integer + '/' + integer + '/' + integer date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # use parse action to convert to ints at parse time integer = Word(nums).setParseAction(lambda toks: int(toks[0])) date_str = integer + '/' + integer + '/' + integer # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] R~(RtmapReRkRRR~(RtfnsR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRzs"cOsF|jtttt|7_|jp<|jdt|_|S(s Add parse action to expression's list of parse actions. See L{I{setParseAction}}. See examples in L{I{copy}}. R~(RkRRReR~RR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytaddParseActions$cs|jdd|jdtr*tntx3|D]+fd}|jj|q7W|jp~|jdt|_|S(sAdd a boolean predicate function to expression's list of parse actions. See L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, functions passed to C{addCondition} need to return boolean success/fail of the condition. Optional keyword arguments: - message = define a custom message to be used in the raised exception - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") date_str = year_int + '/' + integer + '/' + integer result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) tmessagesfailed user-defined conditiontfatalcs7tt|||s3||ndS(N(RRe(RRNRp(texc_typetfnR(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpasR~(RRRRRkRR~(RRRR((RRRs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt addConditions cCs ||_|S(s Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments C{fn(s,loc,expr,err)} where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw C{L{ParseFatalException}} if it is desired to stop parsing immediately.(Rl(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt setFailActions cCsnt}xa|rit}xN|jD]C}y)x"|j||\}}t}q+WWqtk raqXqWq W|S(N(RRRuRR(RRERt exprsFoundtetdummy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_skipIgnorables#s   cCsp|jr|j||}n|jrl|j}t|}x-||krh|||krh|d7}q?Wn|S(Ni(RuRRpRqR(RRERtwttinstrlen((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpreParse0s    cCs |gfS(N((RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt parseImpl<scCs|S(N((RRERt tokenlist((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt postParse?sc Cs|j}|s|jr,|jdr?|jd|||n|rc|jrc|j||}n|}|}yUy|j|||\}}Wn/tk rt|t||j |nXWqt k r(} |jdr|jd|||| n|jr"|j|||| nqXn|rP|jrP|j||}n|}|}|j sw|t|kry|j|||\}}Wqtk rt|t||j |qXn|j|||\}}|j |||}t ||jd|jd|j} |jrf|s7|jrf|ryrxk|jD]`} | ||| }|dk rJt ||jd|jot|t tfd|j} qJqJWWqct k r} |jdr|jd|||| nqcXqfxn|jD]`} | ||| }|dk rt ||jd|joMt|t tfd|j} qqWn|r|jdr|jd||||| qn|| fS(NiiRRi(RvRlR{R}RRRRRRyRRxRR RnRoRzRkR~RRsR( RRERRRt debuggingtpreloct tokensStartttokensterrt retTokensR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _parseNoCacheCsp   &    %$       #cCsNy|j||dtdSWn)tk rIt|||j|nXdS(NRi(RRRRRy(RRER((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyttryParses cCs7y|j||Wnttfk r.tSXtSdS(N(RRRRR(RRER((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt canParseNexts t_UnboundedCachecBseZdZRS(csit|_fd}fd}fd}tj|||_tj|||_tj|||_dS(Ncsj|S(N(R(RR(tcachet not_in_cache(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscs||}) - define your parse action using the full C{(s,loc,toks)} signature, and reference the input string using the parse action's C{s} argument - explictly expand the tabs in your input string before calling C{parseString} Example:: Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text iN( R"RRwt streamlineRuRtt expandtabsRRR R'Rtverbose_stacktrace(RREtparseAllRRRtseRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt parseString#s$      ccs|js|jnx|jD]}|jq W|jsRt|j}nt|}d}|j}|j}t j d} yx||kra| |kray.|||} ||| dt \} } Wnt k r| d}qX| |krT| d7} | | | fV|rK|||} | |kr>| }qQ|d7}q^| }q| d}qWWn(t k r}t jrq|nXdS(s Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional C{maxMatches} argument, to clip scanning after 'n' matches are found. If C{overlap} is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}} for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd iRiN(RwRRuRtRRRRRR"RRRRR(RREt maxMatchestoverlapRRRt preparseFntparseFntmatchesRtnextLocRtnextlocRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt scanStringUsB               c Cs%g}d}t|_yx|j|D]}\}}}|j|||!|rt|trs||j7}qt|tr||7}q|j|n|}q(W|j||g|D]}|r|^q}djt t t |SWn(t k r }t jrq!|nXdS(sf Extension to C{L{scanString}}, to modify matching text with modified tokens that may be returned from a parse action. To use C{transformString}, define a grammar and attach a parse action to it that modifies the returned token list. Invoking C{transformString()} on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. C{transformString()} returns the resulting transformed string. Example:: wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) Prints:: Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. iRrN(RRtRRRsR RRRRRt_flattenRR"R( RRERtlastERpRRtoRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR{s(     cCsey6tg|j||D]\}}}|^qSWn(tk r`}tjrWqa|nXdS(s~ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) prints:: ['More', 'Iron', 'Lead', 'Gold', 'I'] N(R RRR"R(RRERRpRRRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt searchStrings 6 c csfd}d}xJ|j|d|D]3\}}}|||!V|rO|dVn|}q"W||VdS(s[ Generator method to split a string using the given expression as a separator. May be called with optional C{maxsplit} argument, to limit the number of splits; and the optional C{includeSeparators} argument (default=C{False}), if the separating matching text should be included in the split results. Example:: punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) prints:: ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] iRN(R( RREtmaxsplittincludeSeparatorstsplitstlastRpRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs %   cCsdt|tr!tj|}nt|tsTtjdt|tdddSt ||gS(s Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement converts them to L{Literal}s by default. Example:: greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) Prints:: Hello, World! -> ['Hello', ',', 'World', '!'] s4Cannot combine element of type %s with ParserElementt stackleveliN( RsRR"RitwarningstwarnRt SyntaxWarningRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  cCs\t|tr!tj|}nt|tsTtjdt|tdddS||S(s] Implementation of + operator when left operand is not a C{L{ParserElement}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs cCsmt|tr!tj|}nt|tsTtjdt|tdddSt |t j |gS(sQ Implementation of - operator, returns C{L{And}} with error stop s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRRRt _ErrorStop(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__sub__s cCs\t|tr!tj|}nt|tsTtjdt|tdddS||S(s] Implementation of - operator when left operand is not a C{L{ParserElement}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rsub__ s csEt|tr|d}}n-t|tr7|d d }|dd kr_d|df}nt|dtr|dd kr|ddkrtS|ddkrtS|dtSqLt|dtrt|dtr|\}}||8}qLtdt|dt|dntdt||dkrgtdn|dkrtdn||kodknrtdn|rfd |r |dkr|}qt g||}qA|}n(|dkr.}nt g|}|S( s Implementation of * operator, allows use of C{expr * 3} in place of C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples may also include C{None} as in: - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} Note that C{expr*(None,n)} does not raise an exception if more than n exprs exist in the input stream; that is, C{expr*(None,n)} does not enforce a maximum number of expr occurrences. If this behavior is desired, then write C{expr*(None,n) + ~expr} iiis7cannot multiply 'ParserElement' and ('%s','%s') objectss0cannot multiply 'ParserElement' and '%s' objectss/cannot multiply ParserElement by negative values@second tuple value must be greater or equal to first tuple values+cannot multiply ParserElement by 0 or (0,0)cs2|dkr$t|dStSdS(Ni(R(tn(tmakeOptionalListR(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR]s N(NN( RsRottupleRR0RRRt ValueErrorR(RR t minElementst optElementsR}((RRs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__mul__,sD#  &  )      cCs |j|S(N(R(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rmul__pscCsdt|tr!tj|}nt|tsTtjdt|tdddSt ||gS(sI Implementation of | operator - returns C{L{MatchFirst}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__or__ss cCs\t|tr!tj|}nt|tsTtjdt|tdddS||BS(s] Implementation of | operator when left operand is not a C{L{ParserElement}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ror__s cCsdt|tr!tj|}nt|tsTtjdt|tdddSt ||gS(sA Implementation of ^ operator - returns C{L{Or}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__xor__s cCs\t|tr!tj|}nt|tsTtjdt|tdddS||AS(s] Implementation of ^ operator when left operand is not a C{L{ParserElement}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rxor__s cCsdt|tr!tj|}nt|tsTtjdt|tdddSt ||gS(sC Implementation of & operator - returns C{L{Each}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRRR (RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__and__s cCs\t|tr!tj|}nt|tsTtjdt|tdddS||@S(s] Implementation of & operator when left operand is not a C{L{ParserElement}} s4Cannot combine element of type %s with ParserElementRiN( RsRR"RiRRRRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rand__s cCs t|S(sE Implementation of ~ operator - returns C{L{NotAny}} (R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt __invert__scCs'|dk r|j|S|jSdS(s  Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") N(RRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__call__s  cCs t|S(s Suppresses the output of this C{ParserElement}; useful to keep punctuation from cluttering up returned output. (R)(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsuppressscCs t|_|S(s Disables the skipping of whitespace before matching the characters in the C{ParserElement}'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. (RRp(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytleaveWhitespaces cCst|_||_t|_|S(s8 Overrides the default whitespace chars (RRpRqRRr(RRg((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetWhitespaceCharss   cCs t|_|S(s Overrides default behavior to expand C{}s to spaces before parsing the input string. Must be called before C{parseString} when the input grammar contains elements that match C{} characters. (RRt(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt parseWithTabss cCsrt|trt|}nt|trR||jkrn|jj|qnn|jjt|j|S(s Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] (RsRR)RuRR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytignores cCs1|p t|pt|ptf|_t|_|S(sT Enable display of debugging messages while doing pattern matching. (RGRKRMR{RRv(Rt startActiont successActiontexceptionAction((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDebugActions s    cCs)|r|jtttn t|_|S(s Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match at loc (,)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. (RRGRKRMRRv(Rtflag((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDebugs# cCs|jS(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR@scCs t|S(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRCscCst|_d|_|S(N(RRwRRm(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRFs  cCsdS(N((RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckRecursionKscCs|jgdS(sj Check defined expressions for valid structure, check for infinite recursive definitions. N(R(Rt validateTrace((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytvalidateNscCsy|j}Wn5tk rGt|d}|j}WdQXnXy|j||SWn(tk r}tjr}q|nXdS(s Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. trN(treadRtopenRRR"R(Rtfile_or_filenameRt file_contentstfRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt parseFileTs  cCsdt|tr1||kp0t|t|kSt|trM|j|Stt||kSdS(N(RsR"tvarsRRtsuper(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__eq__hs " cCs ||k S(N((RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ne__pscCstt|S(N(thashtid(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__hash__sscCs ||kS(N((RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__req__vscCs ||k S(N((RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rne__yscCs:y!|jt|d|tSWntk r5tSXdS(s Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. Parameters: - testString - to test against this expression for a match - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests Example:: expr = Word(nums) assert expr.matches("100") RN(RRRRR(Rt testStringR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR|s  t#cCsyt|tr6tttj|jj}nt|trTt|}ng}g}t } x|D]} |d k r|j | t s|r| r|j | qmn| sqmndj|| g} g}yQ| jdd} |j| d|} | j | jd|| o%| } Wntk r} t| trPdnd}d| kr| j t| j| | j dt| j| dd |n| j d| jd || j d t| | o|} | } n<tk r*}| j d t|| o|} |} nX|rX|rG| j dndj| GHn|j | | fqmW| |fS( s3 Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. Parameters: - tests - a list of separate test strings, or a multiline string of test strings - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - comment - (default=C{'#'}) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; if False, only dump nested list - printResults - (default=C{True}) prints test output to stdout - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if C{failureTests} is True), and the results contain a list of lines of each test's output Example:: number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' # unsigned integer 100 # negative integer -100 # float with scientific notation 6.02e23 # integer with scientific notation 1e-12 ''') print("Success" if result[0] else "Failed!") result = number_expr.runTests(''' # stray character 100Z # missing leading digit before '.' -.100 # too many '.' 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") prints:: # unsigned integer 100 [100] # negative integer -100 [-100] # float with scientific notation 6.02e23 [6.02e+23] # integer with scientific notation 1e-12 [1e-12] Success # stray character 100Z ^ FAIL: Expected end of text (at char 3), (line:1, col:4) # missing leading digit before '.' -.100 ^ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) # too many '.' 3.14.159 ^ FAIL: Expected end of text (at char 4), (line:1, col:5) Success Each test string must be on a single line. If you want to test a string that spans multiple lines, create a test like this:: expr.runTest(r"this is a test\n of strings that spans \n 3 lines") (Note that this is a raw string literal, you must include the leading 'r'.) s s\nRR6s(FATAL)Rrt it^sFAIL: sFAIL-EXCEPTION: N(RsRRRRuRtrstript splitlinesRRRRRRRRRR3RRRERR7Ra(RttestsRtcommenttfullDumpt printResultst failureTestst allResultstcommentstsuccessRpRtresultRRRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytrunTestssNW' +  ,    N(PRRRRfRRt staticmethodRhRjRRRRRRRzRRRRRRRRRRRRRRRRRRRRRRRRRt_MAX_INTRR{RRR RRRRRRRRRRRRRRRRRRRRRRRRRR R R RRRRR"(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR"8s      &   H     " 2G +   D      )            cBseZdZdZRS(sT Abstract C{ParserElement} subclass, for defining atomic matching patterns. cCstt|jdtdS(NR(R R*RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s(RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR* scBseZdZdZRS(s, An empty token, will always match. cCs2tt|jd|_t|_t|_dS(NR (R R RRRRsRRx(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  (RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR  scBs#eZdZdZedZRS(s( A token that will never match. cCs;tt|jd|_t|_t|_d|_dS(NRsUnmatchable token( R RRRRRsRRxRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR* s    cCst|||j|dS(N(RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR1 s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR& s cBs#eZdZdZedZRS(s Token to exactly match a specified string. Example:: Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" For case-insensitive matching, use L{CaselessLiteral}. For keyword matching (force word break before and after the matched string), use L{Keyword} or L{CaselessKeyword}. cCstt|j||_t||_y|d|_Wn0tk rntj dt ddt |_ nXdt |j|_d|j|_t|_t|_dS(Nis2null string passed to Literal; use Empty() insteadRis"%s"s Expected (R RRtmatchRtmatchLentfirstMatchCharRRRRR R^RRRyRRsRx(Rt matchString((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRC s      cCsg|||jkrK|jdks7|j|j|rK||j|jfSt|||j|dS(Ni(R'R&t startswithR%RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRV s$(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR5 s  cBsKeZdZedZdedZedZ dZ e dZ RS(s\ Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with C{L{Literal}}: - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$" - C{caseless} allows case-insensitive matching, default is C{False}. Example:: Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception For case-insensitive matching, use L{CaselessKeyword}. s_$cCstt|j|dkr+tj}n||_t||_y|d|_Wn't k r}t j dt ddnXd|j|_ d|j |_t|_t|_||_|r|j|_|j}nt||_dS(Nis2null string passed to Keyword; use Empty() insteadRis"%s"s Expected (R RRRtDEFAULT_KEYWORD_CHARSR%RR&R'RRRRRRyRRsRxtcaselesstuppert caselessmatchRt identChars(RR(R.R+((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq s&        cCsb|jr||||j!j|jkrF|t||jkse|||jj|jkrF|dks||dj|jkrF||j|jfSn|||jkrF|jdks|j|j|rF|t||jks|||j|jkrF|dks2||d|jkrF||j|jfSt |||j |dS(Nii( R+R&R,R-RR.R%R'R)RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s #9)$3#cCs%tt|j}tj|_|S(N(R RRR*R.(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cCs |t_dS(s,Overrides the default Keyword chars N(RR*(Rg((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDefaultKeywordChars sN( RRRR1R*RRRRRRR#R/(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR^ s    cBs#eZdZdZedZRS(sl Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for L{CaselessKeyword}.) cCsItt|j|j||_d|j|_d|j|_dS(Ns'%s's Expected (R RRR,t returnStringRRy(RR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cCsS||||j!j|jkr7||j|jfSt|||j|dS(N(R&R,R%R0RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s#(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  cBs&eZdZddZedZRS(s Caseless version of L{Keyword}. Example:: OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for L{CaselessLiteral}.) cCs#tt|j||dtdS(NR+(R RRR(RR(R.((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCs||||j!j|jkrp|t||jks\|||jj|jkrp||j|jfSt|||j|dS(N(R&R,R-RR.R%RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s#9N(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cBs&eZdZddZedZRS(sx A variation on L{Literal} which matches "close" matches, that is, strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - C{match_string} - string to be matched - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match The results from a successful parse will contain the matched text from the input string and the following named results: - C{mismatches} - a list of the positions within the match_string where mismatches were found - C{original} - the original match_string used to compare against the input string If C{mismatches} is an empty list, then the match was an exact match. Example:: patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) # exact match patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) # close match allowing up to 2 mismatches patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) icCs]tt|j||_||_||_d|j|jf|_t|_t|_ dS(Ns&Expected %r (with up to %d mismatches)( R RjRRt match_stringt maxMismatchesRyRRxRs(RR1R2((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s    cCs|}t|}|t|j}||kr|j}d}g} |j} xtt|||!|jD]J\}} | \} } | | kro| j|t| | krPqqoqoW|d}t|||!g}|j|d<| |d<||fSnt|||j|dS(Niitoriginalt mismatches( RR1R2RRRR RRy(RRERRtstartRtmaxlocR1tmatch_stringlocR4R2ts_mtsrctmattresults((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s(    ,        (RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRj s cBs>eZdZddddeddZedZdZRS(s Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. An optional C{excludeChars} parameter can list characters that might be found in the input C{bodyChars} string; useful to define a word of all printables except for one or two characters, for instance. L{srange} is useful for defining custom character set strings for defining C{Word} expressions, using range notation from regular expression character sets. A common mistake is to use C{Word} to match a specific literal string, as in C{Word("Address")}. Remember that C{Word} uses the string argument to define I{sets} of matchable characters. This expression would match "Add", "AAA", "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an exact literal string, use L{Literal} or L{Keyword}. pyparsing includes helper strings for building Words: - L{alphas} - L{nums} - L{alphanums} - L{hexnums} - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - L{printables} (any non-whitespace character) Example:: # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") iicstt|jrcdjfd|D}|rcdjfd|D}qcn||_t||_|r||_t||_n||_t||_|dk|_ |dkrt dn||_ |dkr||_ n t |_ |dkr)||_ ||_ nt||_d|j|_t|_||_d|j|jkr}|dkr}|dkr}|dkr}|j|jkrd t|j|_net|jdkrd tj|jt|jf|_n%d t|jt|jf|_|jrDd |jd |_nytj|j|_Wq}tk ryd|_q}XndS( NRrc3s!|]}|kr|VqdS(N((RR(t excludeChars(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys 7 sc3s!|]}|kr|VqdS(N((RR(R<(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys 9 siisZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitteds Expected Rs[%s]+s%s[%s]*s [%s][%s]*s\b(R R-RRt initCharsOrigRt initCharst bodyCharsOrigt bodyCharst maxSpecifiedRtminLentmaxLenR$RRRyRRxt asKeywordt_escapeRegexRangeCharstreStringRR|tescapetcompileRaR(RR>R@tmintmaxtexactRDR<((R<s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR4 sT%             :   c Cs|jr[|jj||}|s?t|||j|n|j}||jfS|||jkrt|||j|n|}|d7}t|}|j}||j }t ||}x*||kr|||kr|d7}qWt } |||j krt } n|jrG||krG|||krGt } n|jr|dkrp||d|ks||kr|||krt } qn| rt|||j|n||||!fS(Nii(R|R%RRytendtgroupR>RR@RCRIRRBRRARD( RRERRR!R5Rt bodycharsR6tthrowException((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRj s6       %  < cCsytt|jSWntk r*nX|jdkrd}|j|jkr}d||j||jf|_qd||j|_n|jS(NcSs&t|dkr|d dS|SdS(Nis...(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt charsAsStr s s W:(%s,%s)sW:(%s)(R R-RRaRmRR=R?(RRP((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  (N( RRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR- s.6 #cBsDeZdZeejdZddZedZ dZ RS(s Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as named parse results. Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") s[A-Z]icCs3tt|jt|tr|sAtjdtddn||_||_ y+t j |j|j |_ |j|_ Wqt jk rtjd|tddqXnIt|tjr||_ t||_|_ ||_ n tdt||_d|j|_t|_t|_dS(sThe parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.s0null string passed to Regex; use Empty() insteadRis$invalid pattern (%s) passed to RegexsCRegex may only be constructed with a string or a compiled RE objects Expected N(R R%RRsRRRRtpatterntflagsR|RHRFt sre_constantsterrortcompiledREtypeRuRRRRyRRxRRs(RRQRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s.          cCs|jj||}|s6t|||j|n|j}|j}t|j}|rx|D]}||||eZdZddeededZedZdZRS(s Token for matching strings that are delimited by quoting characters. Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=C{None}) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) Example:: qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) prints:: [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] c sttj|j}|sGtjdtddtn|dkr\|}n4|j}|stjdtddtn|_ t |_ |d_ |_ t |_|_|_|_|_|rTtjtjB_dtjj tj d|dk rDt|pGdf_nPd_dtjj tj d|dk rt|pdf_t j d krjd d jfd tt j d dd Dd7_n|r*jdtj|7_n|rhjdtj|7_tjjd_njdtjj 7_y+tjjj_j_Wn4tj k rtjdjtddnXt!_"dj"_#t$_%t&_'dS(Ns$quoteChar cannot be the empty stringRis'endQuoteChar cannot be the empty stringis %s(?:[^%s%s]Rrs%s(?:[^%s\n\r%s]is|(?:s)|(?:c3s<|]2}dtjj| tj|fVqdS(s%s[^%s]N(R|RGt endQuoteCharRE(RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys / sit)s|(?:%s)s|(?:%s.)s(.)s)*%ss$invalid pattern (%s) passed to Regexs Expected ((R R#RRRRRt SyntaxErrorRt quoteCharRt quoteCharLentfirstQuoteCharRXtendQuoteCharLentescChartescQuotetunquoteResultstconvertWhitespaceEscapesR|t MULTILINEtDOTALLRRRGRERQRRtescCharReplacePatternRHRFRSRTRRRyRRxRRs(RR[R_R`t multilineRaRXRb((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR sf             ( %E  c CsT|||jkr(|jj||p+d}|sOt|||j|n|j}|j}|jrJ||j |j !}t |t rJd|kr|j ridd6dd6dd6dd 6}x/|jD]\}}|j||}qWn|jr tj|jd |}n|jrG|j|j|j}qGqJn||fS( Ns\s s\ts s\ns s\fs s\rs\g<1>(R]R|R%RRRyRLRMRaR\R^RsRRbRRR_RReR`RX( RRERRR!R}tws_maptwslittwschar((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRG s*.      !cCs]ytt|jSWntk r*nX|jdkrVd|j|jf|_n|jS(Ns.quoted string, starting with %s ending with %s(R R#RRaRmRR[RX(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRj s N( RRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR# sA #cBs5eZdZddddZedZdZRS(s Token for matching words composed of characters I{not} in a given set (will include whitespace in matched characters if not listed in the provided exclusion set - see example). Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. Example:: # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) prints:: ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] iicCstt|jt|_||_|dkr@tdn||_|dkra||_n t |_|dkr||_||_nt ||_ d|j |_ |jdk|_ t|_dS(Nisfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedis Expected (R RRRRptnotCharsRRBRCR$RRRyRsRx(RRjRIRJRK((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s           cCs|||jkr.t|||j|n|}|d7}|j}t||jt|}x*||kr|||kr|d7}qfW|||jkrt|||j|n||||!fS(Ni(RjRRyRIRCRRB(RRERRR5tnotcharstmaxlen((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  cCsytt|jSWntk r*nX|jdkryt|jdkrfd|jd |_qyd|j|_n|jS(Nis !W:(%s...)s!W:(%s)(R RRRaRmRRRj(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s (RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRv s cBsXeZdZidd6dd6dd6dd6d d 6Zd d d d dZedZRS(s Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is C{" \t\r\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, as defined for the C{L{Word}} class. sRss ss ss ss s iicsttj|_jdjfdjDdjdjD_t_ dj_ |_ |dkr|_ n t _ |dkr|_ |_ ndS(NRrc3s$|]}|jkr|VqdS(N(t matchWhite(RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys scss|]}tj|VqdS(N(R,t whiteStrs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys ss Expected i(R R,RRmRRRqRRRsRyRBRCR$(RtwsRIRJRK((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s )       cCs|||jkr.t|||j|n|}|d7}||j}t|t|}x-||kr|||jkr|d7}qcW|||jkrt|||j|n||||!fS(Ni(RmRRyRCRIRRB(RRERRR5R6((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  "(RRRRnRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR, s t_PositionTokencBseZdZRS(cCs8tt|j|jj|_t|_t|_ dS(N( R RpRR^RRRRsRRx(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s (RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRp scBs,eZdZdZdZedZRS(sb Token to advance to a specific column of input text; useful for tabular report scraping. cCs tt|j||_dS(N(R RRR7(Rtcolno((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCst|||jkrt|}|jrB|j||}nxE||kr||jrt|||jkr|d7}qEWn|S(Ni(R7RRuRtisspace(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  7cCs^t||}||jkr6t||d|n||j|}|||!}||fS(NsText not in expected column(R7R(RRERRtthiscoltnewlocR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  (RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  cBs#eZdZdZedZRS(s Matches if current position is at the beginning of a line within the parse string Example:: test = ''' AAA this line AAA and this line AAA but not this one B AAA and definitely not this one ''' for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) Prints:: ['AAA', ' this line'] ['AAA', ' and this line'] cCs tt|jd|_dS(NsExpected start of line(R RRRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR& scCs;t||dkr|gfSt|||j|dS(Ni(R7RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR* s (RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cBs#eZdZdZedZRS(sU Matches if current position is at the end of a line within the parse string cCs<tt|j|jtjjddd|_dS(Ns RrsExpected end of line(R RRRR"RfRRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR3 scCs|t|krK||dkr0|ddfSt|||j|n8|t|krk|dgfSt|||j|dS(Ns i(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR8 s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR/ s cBs#eZdZdZedZRS(sM Matches if current position is at the beginning of the parse string cCs tt|jd|_dS(NsExpected start of text(R R(RRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRG scCsL|dkrB||j|dkrBt|||j|qBn|gfS(Ni(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRK s (RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR(C s cBs#eZdZdZedZRS(sG Matches if current position is at the end of the parse string cCs tt|jd|_dS(NsExpected end of text(R R'RRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRV scCs|t|kr-t|||j|nT|t|krM|dgfS|t|kri|gfSt|||j|dS(Ni(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZ s (RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR'R s cBs&eZdZedZedZRS(sp Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of the string being parsed, or at the beginning of a line. cCs/tt|jt||_d|_dS(NsNot at the start of a word(R R/RRt wordCharsRy(RRu((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRl scCs^|dkrT||d|jks6|||jkrTt|||j|qTn|gfS(Nii(RuRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq s  (RRRRTRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR/d s cBs&eZdZedZedZRS(sZ Matches if the current position is at the end of a Word, and is not followed by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{} behavior of regular expressions, use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of the string being parsed, or at the end of a line. cCs8tt|jt||_t|_d|_dS(NsNot at the end of a word(R R.RRRuRRpRy(RRu((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cCsvt|}|dkrl||krl|||jksN||d|jkrlt|||j|qln|gfS(Nii(RRuRRy(RRERRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  (RRRRTRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR.x s cBsqeZdZedZdZdZdZdZdZ dZ edZ gd Z d Z RS( s^ Abstract subclass of ParserElement, for combining and post-processing parsed tokens. cCstt|j|t|tr4t|}nt|tr[tj|g|_ nt|t j rt|}t d|Drt tj|}nt||_ n3yt||_ Wntk r|g|_ nXt|_dS(Ncss|]}t|tVqdS(N(RsR(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(R RRRsRRRR"RitexprsRtIterabletallRRRR}(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  cCs |j|S(N(Rv(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCs|jj|d|_|S(N(RvRRRm(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cCsPt|_g|jD]}|j^q|_x|jD]}|jq8W|S(s~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on all contained expressions.(RRpRvRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s  %cCst|trb||jkrtt|j|x(|jD]}|j|jdq>Wqn>tt|j|x%|jD]}|j|jdqW|S(Ni(RsR)RuR RRRv(RR R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCsfytt|jSWntk r*nX|jdkr_d|jjt|j f|_n|jS(Ns%s:(%s)( R RRRaRmRR^RRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s %cCswtt|jx|jD]}|jqWt|jdkr`|jd}t||jr|j r|jdkr|j r|j|jdg|_d|_ |j |j O_ |j |j O_ n|jd}t||jr`|j r`|jdkr`|j r`|jd |j|_d|_ |j |j O_ |j |j O_ q`ndt||_|S(Niiiis Expected (R RRRvRRsR^RkRnRRvRmRsRxRRy(RRR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s0        cCstt|j||}|S(N(R RR(RRRR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCs@||g}x|jD]}|j|qW|jgdS(N(RvRR(RRttmpR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCs>tt|j}g|jD]}|j^q|_|S(N(R RRRv(RR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s%(RRRRRRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s    "  cBsWeZdZdefdYZedZedZdZdZ dZ RS(s  Requires all given C{ParseExpression}s to be found in the given order. Expressions may be separated by whitespace. May be constructed using the C{'+'} operator. May also be constructed using the C{'-'} operator, which will suppress backtracking. Example:: integer = Word(nums) name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"),name_expr("name"),integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") RcBseZdZRS(cOs3ttj|j||d|_|jdS(Nt-(R RRRRR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s (RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR scCsltt|j||td|jD|_|j|jdj|jdj|_t |_ dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys  si( R RRRxRvRsRRqRpRR}(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s c Cs?|jdj|||dt\}}t}x|jdD]}t|tjr`t}q<n|ry|j|||\}}Wqtk rqtk r}d|_ tj |qt k rt|t ||j|qXn|j|||\}}|s$|jr<||7}q<q<W||fS(NiRi(RvRRRsRRRR!RRt __traceback__RRRRyR( RRERRt resultlistt errorStopRt exprtokensR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s((   %cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR5 scCs@||g}x+|jD] }|j||jsPqqWdS(N(RvRRs(RRtsubRecCheckListR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR: s   cCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRt{Rcss|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys F st}(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRA s *( RRRR RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s    cBsAeZdZedZedZdZdZdZ RS(s Requires that at least one C{ParseExpression} is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the C{'^'} operator. Example:: # construct Or using '^' operator number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) prints:: [['123'], ['3.1416'], ['789']] cCsNtt|j|||jrAtd|jD|_n t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys \ s(R RRRvR4RsR(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRY s c Csd}d}g}x|jD]}y|j||}Wntk rw} d| _| j|kr| }| j}qqtk rt||krt|t||j|}t|}qqX|j ||fqW|rh|j ddxn|D]c\} }y|j |||SWqtk r`} d| _| j|kra| }| j}qaqXqWn|dk r|j|_ |nt||d|dS(NiRcSs |d S(Ni((tx((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqu Rrs no defined alternatives to match( RRvRRR{RRRRyRtsortRR( RRERRt maxExcLoct maxExceptionRRtloc2Rt_((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR` s<      cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ixor__ scCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs ^ css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s *cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s( RRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRK s    &  cBsAeZdZedZedZdZdZdZ RS(s Requires that at least one C{ParseExpression} is found. If two expressions match, the first one listed is the one that will match. May be constructed using the C{'|'} operator. Example:: # construct MatchFirst using '|' operator # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] # put more selective expression first number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] cCsNtt|j|||jrAtd|jD|_n t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(R RRRvR4RsR(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s c Csd}d}x|jD]}y|j|||}|SWqtk ro}|j|kr|}|j}qqtk rt||krt|t||j|}t|}qqXqW|dk r|j|_|nt||d|dS(Nis no defined alternatives to match( RRvRRRRRRyR( RRERRRRRR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s$    cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ior__ scCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs | css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s *cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s( RRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s     cBs8eZdZedZedZdZdZRS(sm Requires all given C{ParseExpression}s to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the C{'&'} operator. Example:: color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) shape_attr = "shape:" + shape_type("shape") posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") color_attr = "color:" + color("color") size_attr = "size:" + integer("size") # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) shape_spec.runTests(''' shape: SQUARE color: BLACK posn: 100, 120 shape: CIRCLE size: 50 color: BLUE posn: 50,80 color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) prints:: shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK - posn: ['100', ',', '120'] - x: 100 - y: 120 - shape: SQUARE shape: CIRCLE size: 50 color: BLUE posn: 50,80 ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - color: BLUE - posn: ['50', ',', '80'] - x: 50 - y: 80 - shape: CIRCLE - size: 50 color: GREEN size: 20 shape: TRIANGLE posn: 20,40 ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - color: GREEN - posn: ['20', ',', '40'] - x: 20 - y: 40 - shape: TRIANGLE - size: 20 cCsKtt|j||td|jD|_t|_t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s( R R RRxRvRsRRptinitExprGroups(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs cCs4|jrLtd|jD|_g|jD]}t|tr/|j^q/}g|jD]%}|jr]t|t r]|^q]}|||_g|jD]}t|t r|j^q|_ g|jD]}t|t r|j^q|_ g|jD]$}t|tt t fs|^q|_ |j |j 7_ t|_n|}|j }|j} g} t} x| r_|| |j |j } g} x| D]}y|j||}Wntk r| j|qX| j|jjt||||kr|j|q|| kr| j|qqWt| t| krut} ququW|rdjd|D}t||d|n| g|jD]*}t|tr|j| kr|^q7} g}x6| D].}|j|||\}}|j|qWt|tg}||fS(Ncss3|])}t|trt|j|fVqdS(N(RsRRRF(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys ss, css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys =ss*Missing one or more required elements (%s)(RRRvtopt1mapRsRRFRst optionalsR0tmultioptionalsRt multirequiredtrequiredRRRRRRRtremoveRRRtsumR (RRERRRtopt1topt2ttmpLocttmpReqdttmpOptt matchOrdert keepMatchingttmpExprstfailedtmissingR|R;t finalResults((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsP .5 117      "   > cCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs & css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys PsR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRKs *cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs(RRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s 5  1 cBs_eZdZedZedZdZdZdZ dZ gdZ dZ RS( sa Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. cCstt|j|t|trattjtrItj|}qatjt |}n||_ d|_ |dk r|j |_ |j|_|j|j|j|_|j|_|j|_|jj|jndS(N(R RRRsRt issubclassR"RiR*RRFRRmRxRsRRqRpRoR}RuR(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR^s        cCsG|jdk r+|jj|||dtStd||j|dS(NRRr(RFRRRRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRpscCs>t|_|jj|_|jdk r:|jjn|S(N(RRpRFRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRvs  cCst|trc||jkrtt|j||jdk r`|jj|jdq`qn?tt|j||jdk r|jj|jdn|S(Ni(RsR)RuR RRRFR(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR}s cCs6tt|j|jdk r2|jjn|S(N(R RRRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsV||kr"t||gn||g}|jdk rR|jj|ndS(N(R$RFRR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs  cCsA||g}|jdk r0|jj|n|jgdS(N(RFRRR(RRRy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsuytt|jSWntk r*nX|jdkrn|jdk rnd|jjt |jf|_n|jS(Ns%s:(%s)( R RRRaRmRRFR^RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs %( RRRRRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZs      cBs#eZdZdZedZRS(s Lookahead matching of the given parse expression. C{FollowedBy} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. C{FollowedBy} always returns a null token list. Example:: # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] cCs#tt|j|t|_dS(N(R R RRRs(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs|jj|||gfS(N(RFR(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s cBs,eZdZdZedZdZRS(s Lookahead to disallow matching with the given parse expression. C{NotAny} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression does I{not} match at the current position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} always returns a null token list. May be constructed using the '~' operator. Example:: cCsBtt|j|t|_t|_dt|j|_ dS(NsFound unwanted token, ( R RRRRpRRsRRFRy(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs  cCs:|jj||r0t|||j|n|gfS(N(RFRRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRs~{R(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs (RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs   t_MultipleMatchcBs eZddZedZRS(cCsftt|j|t|_|}t|trFtj|}n|dk rY|nd|_ dS(N( R RRRRoRsRR"RiRt not_ender(RRFtstopOntender((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs  c Cs|jj}|j}|jdk }|r9|jj}n|rO|||n||||dt\}}y|j } xo|r|||n| r|||} n|} ||| |\}} | s| jr~|| 7}q~q~WWnt t fk rnX||fS(NR( RFRRRRRRRuRRR( RRERRtself_expr_parsetself_skip_ignorablest check_endert try_not_enderRthasIgnoreExprsRt tmptokens((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs,   N(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs cBseZdZdZRS(s Repetition of one or more of the given expression. Parameters: - expr - expression that must match one or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] # could also be written as (attr_expr * (1,)).parseString(text).pprint() cCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRRs}...(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR!s (RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscBs/eZdZddZedZdZRS(sw Optional repetition of zero or more of the given expression. Parameters: - expr - expression that must match zero or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example: similar to L{OneOrMore} cCs)tt|j|d|t|_dS(NR(R R0RRRs(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR6scCsEy tt|j|||SWnttfk r@|gfSXdS(N(R R0RRR(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR:s cCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRRs]...(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR@s N(RRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR0*s   t _NullTokencBs eZdZeZdZRS(cCstS(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRJscCsdS(NRr((R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRMs(RRRR>R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRIs cBs/eZdZedZedZdZRS(sa Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 # ZIP+4 form 12101-0001 # invalid ZIP 98765- ''') prints:: # traditional ZIP code 12345 ['12345'] # ZIP+4 form 12101-0001 ['12101-0001'] # invalid ZIP 98765- ^ FAIL: Expected end of text (at char 5), (line:1, col:6) cCsAtt|j|dt|jj|_||_t|_dS(NR( R RRRRFRoRRRs(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRts cCsy(|jj|||dt\}}Wnottfk r|jtk r|jjrt|jg}|j||jj ['3', '.', '1416'] # will also erroneously match the following print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] real = Combine(Word(nums) + '.' + Word(nums)) print(real.parseString('3.1416')) # -> ['3.1416'] # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) RrcCsQtt|j||r)|jn||_t|_||_t|_dS(N( R RRRtadjacentRRpt joinStringR}(RRFRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRrs    cCs6|jrtj||ntt|j||S(N(RR"RR R(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR|s cCse|j}|2|tdj|j|jgd|j7}|jr]|jr]|gS|SdS(NRrR(RR RRRRzRnR(RRERRtretToks((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs  1(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRas cBs eZdZdZdZRS(s Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. Example:: ident = Word(alphas) num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] cCs#tt|j|t|_dS(N(R RRRRo(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs|gS(N((RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs(RRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs  cBs eZdZdZdZRS(sW Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) # access named fields as dict entries, or output as dict print(result['shape']) print(result.asDict()) prints:: ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} See more examples at L{ParseResults} of accessing fields by results name. cCs#tt|j|t|_dS(N(R R RRRo(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsTx9t|D]+\}}t|dkr1q n|d}t|trct|dj}nt|dkrtd|||nX|S(ss Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <>entering %s(line: '%s', %d, %r) s< ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] s [Rs]...N(RRR0RR)(RFtdelimtcombinetdlName((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR>9s ,!cstfd}|dkrBttjd}n |j}|jd|j|dt|jdt dS(s: Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] cs;|d}|r,ttg|p5tt>gS(Ni(RRRA(RRNRpR(t arrayExprRF(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcountFieldParseAction_s -cSst|dS(Ni(Ro(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqdRrtarrayLenR~s(len) s...N( R RR-RPRzRRRRR(RFtintExprR((RRFs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR:Ls    cCsMg}x@|D]8}t|tr8|jt|q |j|q W|S(N(RsRRRR(tLR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRks  csFtfd}|j|dtjdt|S(s* Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. csc|rTt|dkr'|d>q_t|j}td|D>n t>dS(Niicss|]}t|VqdS(N(R(Rttt((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(RRRRR (RRNRpttflat(trep(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcopyTokenToRepeaters R~s(prev) (R RRRR(RFR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRMts  cs\t|j}|Kfd}|j|dtjdt|S(sS Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do I{not} use with packrat parsing enabled. cs8t|jfd}j|dtdS(Ncs7t|j}|kr3tdddndS(NRri(RRR(RRNRpt theseTokens(t matchTokens(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytmustMatchTheseTokenss R~(RRRzR(RRNRpR(R(Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsR~s(prev) (R RRRRR(RFte2R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRLs   cCsUx$dD]}|j|t|}qW|jdd}|jdd}t|S(Ns\^-]s s\ns s\t(Rt_bslashR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyREs  c sD|r!d}d}tnd}d}tg}t|tr]|j}n7t|tjr~t|}ntj dt dd|st Sd}x|t |d krV||}xt ||d D]f\}} || |r |||d =Pq||| r|||d =|j|| | }PqqW|d 7}qW| r|ryt |t d j|krtd d jd |Djd j|Stdjd|Djd j|SWqtk rtj dt ddqXntfd|Djd j|S(s Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns a C{L{MatchFirst}} for best performance. Parameters: - strs - a string of space-delimited literals, or a collection of string literals - caseless - (default=C{False}) - treat all literals as caseless - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) Example:: comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) prints:: [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] cSs|j|jkS(N(R,(R tb((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs|jj|jS(N(R,R)(R R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs ||kS(N((R R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs |j|S(N(R)(R R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrs6Invalid argument to oneOf, expected string or iterableRiiiRrs[%s]css|]}t|VqdS(N(RE(Rtsym((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys ss | t|css|]}tj|VqdS(N(R|RG(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys ss7Exception creating Regex for oneOf, building MatchFirstc3s|]}|VqdS(N((RR(tparseElementClass(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys s(RRRsRRRRwRRRRRRRRRR%RRaR( tstrsR+tuseRegextisequaltmaskstsymbolsRtcurRR ((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRQsL        ! !33  cCsttt||S(s Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the C{Dict} results can include named token fields. Example:: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) # similar to Dict, but simpler call format result = dictOf(attr_label, attr_value).parseString(text) print(result.dump()) print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} (R R0R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR?s!cCs|tjd}|j}t|_|d||d}|rVd}n d}|j||j|_|S(s Helper to return the original, untokenized text for a given expression. Useful to restore the parsed fields of an HTML start tag into the raw tag text itself, or to revert separate tokens with intervening whitespace back to the original matching input text. By default, returns astring containing the original parsed text. If the optional C{asString} argument is passed as C{False}, then the return value is a C{L{ParseResults}} containing any results names that were originally matched, and a single token containing the original matched text from the input string. So if the expression passed to C{L{originalTextFor}} contains expressions with defined results names, you must set C{asString} to C{False} if you want to preserve those results name values. Example:: src = "this is test bold text normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) prints:: [' bold text '] ['text'] cSs|S(N((RRRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq8Rrt_original_startt _original_endcSs||j|j!S(N(RR(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq=RrcSs'||jd|jd!g|(dS(NRR(R(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt extractText?s(R RzRRR}Ru(RFtasStringt locMarkert endlocMarkert matchExprR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRe s      cCst|jdS(sp Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. cSs|dS(Ni((Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqJRr(R+Rz(RF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRfEscCsEtjd}t|d|d|jjdS(s Helper to decorate a returned token with its starting and ending locations in the input string. This helper adds the following results names: - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains C{} characters, you may want to call C{L{ParserElement.parseWithTabs}} Example:: wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) prints:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] cSs|S(N((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq`Rrt locn_startRtlocn_end(R RzRRR(RFtlocator((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRhLss\[]-*.$+^?()~ RKcCs |ddS(Nii((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqkRrs\\0?[xX][0-9a-fA-F]+cCs tt|djddS(Nis\0xi(tunichrRotlstrip(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqlRrs \\0[0-7]+cCstt|dddS(Niii(RRo(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqmRrR<s\]s\wRzRRtnegatetbodyRcsOdy-djfdtj|jDSWntk rJdSXdS(s Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be: - a single character - an escaped character with a leading backslash (such as C{\-} or C{\]}) - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) (C{\0x##} is also supported for backwards compatibility) - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) cSsKt|ts|Sdjdtt|dt|ddDS(NRrcss|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sii(RsR RRtord(tp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrRrc3s|]}|VqdS(N((Rtpart(t _expanded(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys sN(Rt_reBracketExprRRRa(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR]rs  - csfd}|S(st Helper method for defining parse actions that require matching at a specific column in the input text. cs2t||kr.t||dndS(Nsmatched token not at column %d(R7R(R@tlocnRJ(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt verifyCols((RR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRKscs fdS(s Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString}()}. Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] csgS(N((RRNRp(treplStr(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRr((R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZs cCs|ddd!S(s Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] iii((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRXs csafd}y"tdtdj}Wntk rSt}nX||_|S(sG Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional args are passed, they are forwarded to the given function as additional arguments after the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the parsed data to an integer using base 16. Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse ''') wd = Word(alphas).setParseAction(tokenMap(str.title)) OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') prints:: 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] my kingdom for a horse ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] cs g|D]}|^qS(N((RRNRpttokn(RRO(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsRR^(R`RRaRu(RORRRd((RROs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRks    cCst|jS(N(RR,(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCst|jS(N(Rtlower(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCs<t|tr+|}t|d| }n |j}tttd}|rtjj t }t d|dt t t|t d|tddtgjdj d t d }nd jd tD}tjj t t|B}t d|dt t t|j ttt d|tddtgjdj d t d }ttd|d }|jdd j|jddjjjd|}|jdd j|jddjjjd|}||_||_||fS(sRInternal helper to construct opening and closing tag expressions, given a tag nameR+s_-:Rttagt=t/RRAcSs|ddkS(NiR((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrR Rrcss!|]}|dkr|VqdS(R N((RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys scSs|ddkS(NiR((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrsRLs(RsRRRR-R2R1R<RRzRXR)R R0RRRRRRTRWR@Rt_LRttitleRRR(ttagStrtxmltresnamet tagAttrNamet tagAttrValuetopenTagtprintablesLessRAbracktcloseTag((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt _makeTagss" o{AA  cCs t|tS(s  Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. Example:: text = 'More info at the pyparsing wiki page' # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): # attributes in the tag (like "href" shown here) are also accessible as named results print(link.link_text, '->', link.href) prints:: pyparsing -> http://pyparsing.wikispaces.com (R R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRIscCs t|tS(s Helper to construct opening and closing tag expressions for XML, given a tag name. Matches tags only in the given upper/lower case. Example: similar to L{makeHTMLTags} (R R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRJscsT|r|n |jgD]\}}||f^q#fd}|S(s< Helper to create a validating parse action to be used with start tags created with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as C{} or C{
}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 csx~D]v\}}||kr8t||d|n|tjkr|||krt||d||||fqqWdS(Nsno matching attribute s+attribute '%s' has value '%s', must be '%s'(RRct ANY_VALUE(RRNRtattrNamet attrValue(tattrs(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRRs   (R(RtattrDictRRR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRcs 2  %cCs'|rd|nd}ti||6S(s Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this <div> has no class
''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 s%s:classtclass(Rc(t classnamet namespacet classattr((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRi\s t(RYcCs<t}||||B}xt|D]\}}|d d \}} } } | dkrdd|nd|} | dkr|d kst|dkrtdn|\} }ntj| }| tjkr| dkr t||t |t |}q| dkrx|d k rQt|||t |t ||}qt||t |t |}q| dkrt|| |||t || |||}qtdn+| tj kr| dkr)t |t st |}nt|j|t ||}q| dkr|d k rpt|||t |t ||}qt||t |t |}q| dkrt|| |||t || |||}qtdn td | r |j| n||j| |BK}|}q(W||K}|S( s Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] iis%s terms %s%s termis@if numterms=3, opExpr must be a tuple or list of two expressionsis6operator must be unary (1), binary (2), or ternary (3)s2operator must indicate right or left associativityN(N(R RRRRRRRtLEFTR RRtRIGHTRsRRFRz(tbaseExprtopListtlpartrparR}tlastExprRtoperDeftopExprtaritytrightLeftAssocRttermNametopExpr1topExpr2tthisExprR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRgsR;    '  /'   $  /'     s4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*t"s string enclosed in double quotess4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*t's string enclosed in single quotess*quotedString using single or double quotestusunicode string literalcCs!||krtdn|d krt|trt|trt|dkrt|dkr|d k rtt|t||tj ddj d}q|t j t||tj j d}q|d k r9tt|t |t |ttj ddj d}qttt |t |ttj ddj d}qtdnt}|d k r|tt|t||B|Bt|K}n.|tt|t||Bt|K}|jd ||f|S( s~ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] s.opening and closing strings cannot be the sameiRKcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq9RrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq<RrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqBRrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqFRrsOopening and closing arguments must be strings if no content expression is givensnested %s%s expressionN(RRRsRRRRRR"RfRzRARRR RR)R0R(topenertclosertcontentRR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRNs4:  $  $    5.c s5fd}fd}fd}ttjdj}ttj|jd}tj|jd}tj|jd} |rtt||t|t|t|| } n0tt|t|t|t|} |j t t| jdS( s Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] css|t|krdSt||}|dkro|dkrZt||dnt||dndS(Nisillegal nestingsnot a peer entry(RR7RR(RRNRptcurCol(t indentStack(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckPeerIndentscsEt||}|dkr/j|nt||ddS(Nisnot a subentry(R7RR(RRNRpR+(R,(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckSubIndentscsn|t|krdSt||}oH|dkoH|dks`t||dnjdS(Niisnot an unindent(RR7RR(RRNRpR+(R,(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt checkUnindents &s tINDENTRrtUNINDENTsindented block( RRRRR RzRRRRR( tblockStatementExprR,R$R-R.R/R7R0tPEERtUNDENTtsmExpr((R,s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRdQsN"8 $s#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]s[\0xa1-\0xbf\0xd7\0xf7]s_:sany tagsgt lt amp nbsp quot aposs><& "'s &(?PRs);scommon HTML entitycCstj|jS(sRHelper parser action to replace common HTML entities with their special characters(t_htmlEntityMapRtentity(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRYss/\*(?:[^*]|\*(?!/))*s*/sC style commentss HTML comments.*s rest of lines//(?:\\\n|[^\n])*s // commentsC++ style comments#.*sPython style comments t commaItemRcBseZdZeeZeeZee j dj eZ ee j dj eedZedj dj eZej edej ej dZejdeeeed jeBj d Zejeed j d j eZed j dj eZeeBeBjZedj dj eZeededj dZedj dZedj dZ e de dj dZ!ee de d8dee de d9j dZ"e"j#ddej d Z$e%e!e$Be"Bj d!j d!Z&ed"j d#Z'e(d$d%Z)e(d&d'Z*ed(j d)Z+ed*j d+Z,ed,j d-Z-e.je/jBZ0e(d.Z1e%e2e3d/e4ee5d0d/ee6d1jj d2Z7e8ee9j:e7Bd3d4j d5Z;e(ed6Z<e(ed7Z=RS(:s Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers}, L{reals}, L{scientific notation}) - common L{programming identifiers} - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - ISO8601 L{dates} and L{datetime} - L{UUID} - L{comma-separated list} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] tintegers hex integeris[+-]?\d+ssigned integerRtfractioncCs|d|dS(Nii((Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrRzs"fraction or mixed integer-fractions [+-]?\d+\.\d*s real numbers+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)s$real number with scientific notations[+-]?\d+\.?\d*([eE][+-]?\d+)?tfnumberRt identifiersK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}s IPv4 addresss[0-9a-fA-F]{1,4}t hex_integerRisfull IPv6 addressiis::sshort IPv6 addresscCstd|DdkS(Ncss'|]}tjj|rdVqdS(iN(Rlt _ipv6_partR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys si(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrs::ffff:smixed IPv6 addresss IPv6 addresss:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}s MAC addresss%Y-%m-%dcsfd}|S(s Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] csPytj|djSWn+tk rK}t||t|nXdS(Ni(RtstrptimetdateRRRu(RRNRptve(tfmt(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcvt_fns((RBRC((RBs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt convertToDatess%Y-%m-%dT%H:%M:%S.%fcsfd}|S(s Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] csJytj|dSWn+tk rE}t||t|nXdS(Ni(RR?RRRu(RRNRpRA(RB(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRCs((RBRC((RBs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytconvertToDatetimess7(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?s ISO8601 dates(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?sISO8601 datetimes2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}tUUIDcCstjj|dS(s Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = 'More info at the
pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' i(Rlt_html_stripperR{(RRNR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt stripHTMLTagss RR<s R8RRrscomma separated listcCst|jS(N(RR,(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCst|jS(N(RR(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRr(ii(ii(>RRRRkRotconvertToIntegertfloattconvertToFloatR-RPRRzR9RBR=R%tsigned_integerR:RRRt mixed_integerRtrealtsci_realRtnumberR;R2R1R<t ipv4_addressR>t_full_ipv6_addresst_short_ipv6_addressRt_mixed_ipv6_addressRt ipv6_addresst mac_addressR#RDREt iso8601_datetiso8601_datetimetuuidR5R4RGRHRRRRTR,t _commasepitemR>RWRtcomma_separated_listRbR@(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRlsL  '/-  ;&J+t__main__tselecttfroms_$RRtcolumnsRttablestcommandsK # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual s] 100 -100 +100 3.14159 6.02e23 1e-12 s 100 FF s6 12345678-1234-5678-1234-567812345678 (Rt __version__t__versionTime__t __author__RtweakrefRRRRxRR|RSRR8RRRRt_threadRt ImportErrort threadingRRt ordereddictRt__all__Rt version_infoRQRtmaxsizeR$RuRtchrRRRRR2treversedRRR4RxRIRJR_tmaxinttxrangeRt __builtin__RtfnameRR`RRRRRRtascii_uppercasetascii_lowercaseR2RPRBR1RRt printableRTRaRRRR!R$RR tMutableMappingtregisterR7RHRERGRKRMROReR"R*R RRRRiRRRRjR-R%R#RR,RpRRRR(R'R/R.RRRRR RR RRRR0RRRR&R RR+RRR R)RR`RR>R:RRMRLRERRQR?ReRfRhRRARGRFR_R^Rzt _escapedPunct_escapedHexChart_escapedOctChartUNICODEt _singleChart _charRangeRRR]RKRZRXRkRbR@R RIRJRcR RiRRRRRgRSR<R\RWRaRNRdR3RUR5R4RRR6RR9RYR6RCRR[R=R;RDRVRRZR8RlRt selectTokent fromTokentidentt columnNametcolumnNameListt columnSpect tableNamet tableNameListt simpleSQLR"RPR;R=RYRF(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt=s              *         8      @ & A=IG3pLOD|M &# @sQ,A ,    I # %  !4@    ,   ?  #   k%Z r  (, #8+    $     PKe[nڲFyFysix.pyonu[ abcA@@sKdZddlmZddlZddlZddlZddlZddlZdZdZ ej ddkZ ej ddkZ ej dd!dakZ e refZefZefZeZeZejZnefZeefZeejfZeZeZejjd r$edcZnVd efd YZ ye!e Wne"k rjedeZn XedgZ[ dZ#dZ$defdYZ%de%fdYZ&dej'fdYZ(de%fdYZ)defdYZ*e*e+Z,de(fdYZ-e)dddde)d d!d"d#d e)d$d!d!d%d$e)d&d'd"d(d&e)d)d'd*e)d+d!d"d,d+e)d-d.d.d/d-e)d0d.d.d-d0e)d1d'd"d2d1e)d3d'e rd4nd5d6e)d7d'd8e)d9d:d;d<e)ddde)d=d=d>e)d?d?d>e)d@d@d>e)d2d'd"d2d1e)dAd!d"dBdAe)dCd!d!dDdCe&d"d'e&dEdFe&dGdHe&dIdJdKe&dLdMdLe&dNdOdPe&dQdRdSe&dTdUdVe&dWdXdYe&dZd[d\e&d]d^d_e&d`dadbe&dcdddee&dfdgdhe&dididje&dkdkdje&dldldje&dmdmdne&dodpe&dqdre&dsdte&dudvdue&dwdxe&dydzd{e&d|d}d~e&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddd~e&ddde&ddde&ddde&de+dde&de+dde&de+de+de&ddde&ddde&dddg>Z.ejdkr;e.e&ddg7Z.nxJe.D]BZ/e0e-e/j1e/e2e/e&rBe,j3e/de/j1qBqBW[/e.e-_.e-e+dZ4e,j3e4dde(fdYZ5e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)d<dde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddgZ6x!e6D]Z/e0e5e/j1e/q0W[/e6e5_.e,j3e5e+dddde(fdYZ7e)ddde)ddde)dddgZ8x!e8D]Z/e0e7e/j1e/qW[/e8e7_.e,j3e7e+dddde(fdYZ9e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddg!Z:x!e:D]Z/e0e9e/j1e/q W[/e:e9_.e,j3e9e+dddde(fdYZ;e)ddde)ddde)ddde)dddgZ<x!e<D]Z/e0e;e/j1e/q W[/e<e;_.e,j3e;e+d d d d e(fd YZ=e)dddgZ>x!e>D]Z/e0e=e/j1e/q; W[/e>e=_.e,j3e=e+ddddej'fdYZ?e,j3e?e+dddZ@dZAe r dZBdZCdZDdZEdZFdZGn$dZBdZCdZDd ZEd!ZFd"ZGy eHZIWneJk r= d#ZInXeIZHy eKZKWneJk rj d$ZKnXe r d%ZLejMZNd&ZOeZPn7d'ZLd(ZNd)ZOd*efd+YZPeKZKe#eLd,ejQeBZRejQeCZSejQeDZTejQeEZUejQeFZVejQeGZWe rd-ZXd.ZYd/ZZd0Z[ej\d1Z]ej\d2Z^ej\d3Z_nQd4ZXd5ZYd6ZZd7Z[ej\d8Z]ej\d9Z^ej\d:Z_e#eXd;e#eYd<e#eZd=e#e[d>e rd?Z`d@ZaebZcddldZdedjedAjfZg[dejhdZiejjZkelZmddlnZnenjoZoenjpZpdBZqej d d krdCZrdDZsq4dEZrdFZsnpdGZ`dHZaecZcebZgdIZidJZkejtejuevZmddloZoeojoZoZpdKZqdCZrdDZse#e`dLe#eadMdNZwdOZxdPZye reze4j{dQZ|ddRZ~ndddSZ|e|dTej d dhkre|dUn)ej d dikre|dVn dWZeze4j{dXdZedkrdYZnej d djkrDeZdZZne#e~d[ej dd!dkkrejejd\Zn ejZd]Zd^Zd_ZgZe+Zejd`dk rge_nejr7xOeejD]>\ZZeej+dkrej1e+kreje=PqqW[[nejje,dS(ls6Utilities for writing code that runs on Python 2 and 3i(tabsolute_importNs'Benjamin Peterson s1.10.0iiitjavaiitXcB@seZdZRS(cC@sdS(NiiI((tself((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__len__>s(t__name__t __module__R(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR<si?cC@s ||_dS(s Add documentation to a function.N(t__doc__(tfunctdoc((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt_add_docKscC@st|tj|S(s7Import module, returning the module after the last dot.(t __import__tsystmodules(tname((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt_import_modulePs t _LazyDescrcB@seZdZdZRS(cC@s ||_dS(N(R(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__init__XscC@sN|j}t||j|yt|j|jWntk rInX|S(N(t_resolvetsetattrRtdelattrt __class__tAttributeError(Rtobjttptresult((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__get__[s  (RRRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRVs t MovedModulecB@s&eZddZdZdZRS(cC@sJtt|j|tr=|dkr1|}n||_n ||_dS(N(tsuperRRtPY3tNonetmod(RRtoldtnew((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRis    cC@s t|jS(N(RR(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRrscC@s/|j}t||}t||||S(N(RtgetattrR(Rtattrt_moduletvalue((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt __getattr__us N(RRRRRR&(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRgs t _LazyModulecB@s eZdZdZgZRS(cC@s)tt|j||jj|_dS(N(RR'RRR(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR~scC@s3ddg}|g|jD]}|j^q7}|S(NRR(t_moved_attributesR(RtattrsR#((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__dir__s #(RRRR*R((((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR'|s  tMovedAttributecB@s eZdddZdZRS(cC@stt|j|trp|dkr1|}n||_|dkrd|dkr[|}qd|}n||_n'||_|dkr|}n||_dS(N(RR+RRRRR#(RRtold_modtnew_modtold_attrtnew_attr((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRs           cC@st|j}t||jS(N(RRR"R#(Rtmodule((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRsN(RRRRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR+st_SixMetaPathImportercB@s_eZdZdZdZdZd dZdZdZ dZ dZ e Z RS( s A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 cC@s||_i|_dS(N(Rt known_modules(Rtsix_module_name((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRs cG@s-x&|D]}||j|jd|(RR6((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt is_packagescC@s|j|dS(s;Return None Required, if is_package is implementedN(R>R(RR6((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytget_codes N( RRRRR7R8RR:R>RARDREt get_source(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR1s       t _MovedItemscB@seZdZgZRS(sLazy loading of moved objects(RRRRB(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRGst cStringIOtiotStringIOtfiltert itertoolstbuiltinstifiltert filterfalset ifilterfalsetinputt __builtin__t raw_inputtinternR tmaptimaptgetcwdtostgetcwdutgetcwdbtrangetxranget reload_modulet importlibtimptreloadtreducet functoolst shlex_quotetpipestshlextquotetUserDictt collectionstUserListt UserStringtziptizipt zip_longestt izip_longestt configparsert ConfigParsertcopyregtcopy_regtdbm_gnutgdbmsdbm.gnut _dummy_threadt dummy_threadthttp_cookiejart cookielibshttp.cookiejart http_cookiestCookies http.cookiest html_entitiesthtmlentitydefss html.entitiest html_parsert HTMLParsers html.parsert http_clientthttplibs http.clienttemail_mime_multipartsemail.MIMEMultipartsemail.mime.multiparttemail_mime_nonmultipartsemail.MIMENonMultipartsemail.mime.nonmultiparttemail_mime_textsemail.MIMETextsemail.mime.texttemail_mime_basesemail.MIMEBasesemail.mime.basetBaseHTTPServers http.servert CGIHTTPServertSimpleHTTPServertcPickletpickletqueuetQueuetreprlibtreprt socketservert SocketServert_threadtthreadttkintertTkinterttkinter_dialogtDialogstkinter.dialogttkinter_filedialogt FileDialogstkinter.filedialogttkinter_scrolledtextt ScrolledTextstkinter.scrolledtextttkinter_simpledialogt SimpleDialogstkinter.simpledialogt tkinter_tixtTixs tkinter.tixt tkinter_ttktttks tkinter.ttkttkinter_constantst Tkconstantsstkinter.constantst tkinter_dndtTkdnds tkinter.dndttkinter_colorchooserttkColorChooserstkinter.colorchooserttkinter_commondialogttkCommonDialogstkinter.commondialogttkinter_tkfiledialogt tkFileDialogt tkinter_fontttkFonts tkinter.fontttkinter_messageboxt tkMessageBoxstkinter.messageboxttkinter_tksimpledialogttkSimpleDialogt urllib_parses.moves.urllib_parses urllib.parset urllib_errors.moves.urllib_errors urllib.errorturllibs .moves.urllibturllib_robotparsert robotparsersurllib.robotparsert xmlrpc_clientt xmlrpclibs xmlrpc.clientt xmlrpc_servertSimpleXMLRPCServers xmlrpc.servertwin32twinregt_winregsmoves.s.movestmovestModule_six_moves_urllib_parsecB@seZdZRS(s7Lazy loading of moved objects in six.moves.urllib_parse(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR@st ParseResultturlparset SplitResulttparse_qst parse_qslt urldefragturljointurlsplitt urlunparset urlunsplitt quote_plustunquotet unquote_plust urlencodet splitquerytsplittagt splitusert uses_fragmentt uses_netloct uses_paramst uses_queryt uses_relativesmoves.urllib_parsesmoves.urllib.parsetModule_six_moves_urllib_errorcB@seZdZRS(s7Lazy loading of moved objects in six.moves.urllib_error(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRhstURLErrorturllib2t HTTPErrortContentTooShortErrors.moves.urllib.errorsmoves.urllib_errorsmoves.urllib.errortModule_six_moves_urllib_requestcB@seZdZRS(s9Lazy loading of moved objects in six.moves.urllib_request(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR|sturlopensurllib.requesttinstall_openert build_openert pathname2urlt url2pathnamet getproxiestRequesttOpenerDirectortHTTPDefaultErrorHandlertHTTPRedirectHandlertHTTPCookieProcessort ProxyHandlert BaseHandlertHTTPPasswordMgrtHTTPPasswordMgrWithDefaultRealmtAbstractBasicAuthHandlertHTTPBasicAuthHandlertProxyBasicAuthHandlertAbstractDigestAuthHandlertHTTPDigestAuthHandlertProxyDigestAuthHandlert HTTPHandlert HTTPSHandlert FileHandlert FTPHandlertCacheFTPHandlertUnknownHandlertHTTPErrorProcessort urlretrievet urlcleanupt URLopenertFancyURLopenert proxy_bypasss.moves.urllib.requestsmoves.urllib_requestsmoves.urllib.requestt Module_six_moves_urllib_responsecB@seZdZRS(s:Lazy loading of moved objects in six.moves.urllib_response(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRstaddbasesurllib.responset addclosehooktaddinfot addinfourls.moves.urllib.responsesmoves.urllib_responsesmoves.urllib.responset#Module_six_moves_urllib_robotparsercB@seZdZRS(s=Lazy loading of moved objects in six.moves.urllib_robotparser(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRstRobotFileParsers.moves.urllib.robotparsersmoves.urllib_robotparsersmoves.urllib.robotparsertModule_six_moves_urllibcB@sheZdZgZejdZejdZejdZejdZ ejdZ dZ RS(sICreate a six.moves.urllib namespace that resembles the Python 3 namespacesmoves.urllib_parsesmoves.urllib_errorsmoves.urllib_requestsmoves.urllib_responsesmoves.urllib_robotparsercC@sdddddgS(NtparseterrortrequesttresponseR((R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR*s( RRRRBt _importerR8RRRRRR*(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRss moves.urllibcC@stt|j|dS(sAdd an item to six.moves.N(RRGR(tmove((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytadd_movescC@s^ytt|WnFtk rYytj|=WqZtk rUtd|fqZXnXdS(sRemove item from six.moves.sno such move, %rN(RRGRRt__dict__R;(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt remove_moves  t__func__t__self__t __closure__t__code__t __defaults__t __globals__tim_functim_selft func_closuret func_codet func_defaultst func_globalscC@s |jS(N(tnext(tit((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytadvance_iterator scC@stdt|jDS(Ncs@s|]}d|jkVqdS(t__call__N(R (t.0tklass((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pys s(tanyttypet__mro__(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcallablescC@s|S(N((tunbound((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytget_unbound_functionscC@s|S(N((Rtcls((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcreate_unbound_methodscC@s|jS(N(R(R"((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR#"scC@stj|||jS(N(ttypest MethodTypeR(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcreate_bound_method%scC@stj|d|S(N(R&R'R(RR$((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR%(stIteratorcB@seZdZRS(cC@st|j|S(N(Rt__next__(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR-s(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR)+ss3Get the function out of a possibly unbound functioncK@st|j|S(N(titertkeys(tdtkw((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytiterkeys>scK@st|j|S(N(R+tvalues(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt itervaluesAscK@st|j|S(N(R+titems(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt iteritemsDscK@st|j|S(N(R+tlists(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt iterlistsGsR,R0R2cK@s |j|S(N(R/(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR/PscK@s |j|S(N(R1(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR1SscK@s |j|S(N(R3(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR3VscK@s |j|S(N(R5(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR5Ystviewkeyst viewvaluest viewitemss1Return an iterator over the keys of a dictionary.s3Return an iterator over the values of a dictionary.s?Return an iterator over the (key, value) pairs of a dictionary.sBReturn an iterator over the (key, [values]) pairs of a dictionary.cC@s |jdS(Nslatin-1(tencode(ts((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytbkscC@s|S(N((R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytunss>BtassertCountEqualtassertRaisesRegexptassertRegexpMatchestassertRaisesRegext assertRegexcC@s|S(N((R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR;scC@st|jdddS(Ns\\s\\\\tunicode_escape(tunicodetreplace(R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR<scC@st|dS(Ni(tord(tbs((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytbyte2intscC@st||S(N(RE(tbufti((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt indexbytesstassertItemsEquals Byte literals Text literalcO@st|t||S(N(R"t_assertCountEqual(Rtargstkwargs((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR=scO@st|t||S(N(R"t_assertRaisesRegex(RRMRN((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR@scO@st|t||S(N(R"t _assertRegex(RRMRN((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRAstexeccC@sC|dkr|}n|j|k r9|j|n|dS(N(Rt __traceback__twith_traceback(RR%ttb((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytreraises   cB@sc|dkrBejd}|j}|dkr<|j}n~n|dkrW|}nddUdS(sExecute code in a namespace.isexec _code_ in _globs_, _locs_N(RR t _getframet f_globalstf_locals(t_code_t_globs_t_locs_tframe((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytexec_s      s9def reraise(tp, value, tb=None): raise tp, value, tb srdef raise_from(value, from_value): if from_value is None: raise value raise value from from_value sCdef raise_from(value, from_value): raise value from from_value cC@s |dS(N((R%t from_value((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt raise_fromstprintc @s|jdtjdkr%dSfd}t}|jdd}|dk rt|trpt}qt|tst dqn|jdd}|dk rt|trt}qt|tst dqn|rt dn|s0x*|D]}t|tr t}Pq q Wn|rQtd }td }n d }d }|dkrr|}n|dkr|}nx7t |D])\} }| r||n||qW||dS( s4The new-style print function for Python 2.4 and 2.5.tfileNc@st|tst|}nttrt|trjdk rtdd}|dkrrd}n|jj|}nj |dS(Nterrorststrict( R?t basestringtstrRaRCtencodingRR"R9twrite(tdataRb(tfp(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRgs  tsepssep must be None or a stringtendsend must be None or a strings$invalid keyword arguments to print()s t ( tpopR tstdoutRtFalseR?RCtTrueRet TypeErrort enumerate( RMRNRgt want_unicodeRjRktargtnewlinetspaceRI((Ris3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytprint_sL              cO@sW|jdtj}|jdt}t|||rS|dk rS|jndS(NRatflush(tgetR RnRmRot_printRRx(RMRNRiRx((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRw s  sReraise an exception.c@sfd}|S(Nc@s(tj|}|_|S(N(Rbtwrapst __wrapped__(tf(tassignedtupdatedtwrapped(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytwrappers ((RR~RR((R~RRs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR{sc@s5dffdY}tj|ddiS(s%Create a base class with a metaclass.t metaclassc@seZfdZRS(c@s||S(N((R$Rt this_basesR-(tbasestmeta(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__new__'s(RRR((RR(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR%sttemporary_class((RR(RRR((RRs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytwith_metaclass sc@sfd}|S(s6Class decorator for creating a class with a metaclass.c@s|jj}|jd}|dk rft|trE|g}nx|D]}|j|qLWn|jdd|jdd|j|j|S(Nt __slots__R t __weakref__( R tcopyRyRR?ReRmRt __bases__(R$t orig_varstslotst slots_var(R(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR.s   ((RR((Rs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt add_metaclass,s cC@sJtrFd|jkr+td|jn|j|_d|_n|S(s A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. t__str__sY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cS@s|jjdS(Nsutf-8(t __unicode__R9(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytJt(tPY2R t ValueErrorRRR(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytpython_2_unicode_compatible<s t__spec__(iiIiIill(ii(ii(ii(ii(Rt __future__RRbRLtoperatorR R&t __author__t __version__t version_infoRRtPY34Ret string_typestintt integer_typesRt class_typest text_typetbytest binary_typetmaxsizetMAXSIZERdtlongt ClassTypeRCtplatformt startswithtobjectRtlent OverflowErrorR RRRt ModuleTypeR'R+R1RRRGR(R#RRR?R7RRt_urllib_parse_moved_attributesRt_urllib_error_moved_attributesRt _urllib_request_moved_attributesRt!_urllib_response_moved_attributesRt$_urllib_robotparser_moved_attributesRR R t _meth_funct _meth_selft _func_closuret _func_codet_func_defaultst _func_globalsRRt NameErrorR!R#R'R(R%R)t attrgettertget_method_functiontget_method_selftget_function_closuretget_function_codetget_function_defaultstget_function_globalsR/R1R3R5t methodcallerR6R7R8R;R<tchrtunichrtstructtStructtpacktint2bytet itemgetterRGtgetitemRJR+t iterbytesRIRJtBytesIORLRORPtpartialRVRER=R@RAR"RMR]RRUR_RwRztWRAPPER_ASSIGNMENTStWRAPPER_UPDATESR{RRRRBt __package__tglobalsRyRtsubmodule_search_locationst meta_pathRrRItimportertappend(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyts               >                                                                                 5         PKe[pDDpackaging/utils.pyonu[ abc@`sDddlmZmZmZddlZejdZdZdS(i(tabsolute_importtdivisiontprint_functionNs[-_.]+cC`stjd|jS(Nt-(t_canonicalize_regextsubtlower(tname((s?/usr/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pytcanonicalize_name s(t __future__RRRtretcompileRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pyts PKe[packaging/_compat.pyonu[ abc@`svddlmZmZmZddlZejddkZejddkZer`efZ n e fZ dZ dS(i(tabsolute_importtdivisiontprint_functionNiic`s5dffdY}tj|ddiS(s/ Create a base class with a metaclass. t metaclassc`seZfdZRS(c`s||S(N((tclstnamet this_basestd(tbasestmeta(sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyt__new__s(t__name__t __module__R ((RR (sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyRsttemporary_class((ttypeR (R RR((RR sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pytwith_metaclasss( t __future__RRRtsyst version_infotPY2tPY3tstrt string_typest basestringR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyts   PKe[BvD&``packaging/specifiers.pycnu[ abc@`s<ddlmZmZmZddlZddlZddlZddlZddlm Z m Z ddl m Z m Z mZdefdYZde ejefd YZd efd YZd efd YZdZdefdYZejdZdZdZdefdYZdS(i(tabsolute_importtdivisiontprint_functionNi(t string_typestwith_metaclass(tVersiont LegacyVersiontparsetInvalidSpecifiercB`seZdZRS(sH An invalid specifier was found, users should refer to PEP 440. (t__name__t __module__t__doc__(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRst BaseSpecifiercB`seZejdZejdZejdZejdZejdZ e j dZ ejddZ ejddZ RS( cC`sdS(s Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. N((tself((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__str__tcC`sdS(sF Returns a hash value for this Specifier like object. N((R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__hash__RcC`sdS(sq Returns a boolean representing whether or not the two Specifier like objects are equal. N((R tother((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__eq__$RcC`sdS(su Returns a boolean representing whether or not the two Specifier like objects are not equal. N((R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__ne__+RcC`sdS(sg Returns whether or not pre-releases as a whole are allowed by this specifier. N((R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt prereleases2RcC`sdS(sd Sets whether or not pre-releases as a whole are allowed by this specifier. N((R tvalue((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR9RcC`sdS(sR Determines if the given item is contained within this specifier. N((R titemR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytcontains@RcC`sdS(s Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. N((R titerableR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytfilterFRN(R R tabctabstractmethodRRRRtabstractpropertyRtsettertNoneRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR st_IndividualSpecifiercB`seZiZdddZdZdZdZdZdZ dZ dZ e d Z e d Ze d Zejd Zd ZddZddZRS(RcC`sj|jj|}|s0tdj|n|jdj|jdjf|_||_dS(NsInvalid specifier: '{0}'toperatortversion(t_regextsearchRtformattgrouptstript_spect _prereleases(R tspecRtmatch((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__init__Rs cC`sF|jdk r!dj|jnd}dj|jjt||S(Ns, prereleases={0!r}Rs<{0}({1!r}{2})>(R(RR$Rt __class__R tstr(R tpre((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__repr___s !  cC`sdj|jS(Ns{0}{1}(R$R'(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRlscC`s t|jS(N(thashR'(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRoscC`s`t|tr:y|j|}WqPtk r6tSXnt||jsPtS|j|jkS(N(t isinstanceRR,RtNotImplementedR'(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRrs cC`s`t|tr:y|j|}WqPtk r6tSXnt||jsPtS|j|jkS(N(R1RR,RR2R'(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR}s cC`st|dj|j|S(Ns _compare_{0}(tgetattrR$t _operators(R top((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt _get_operatorscC`s(t|ttfs$t|}n|S(N(R1RRR(R R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_coerce_versionscC`s |jdS(Ni(R'(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR scC`s |jdS(Ni(R'(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR!scC`s|jS(N(R((R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s ||_dS(N(R((R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s |j|S(N(R(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt __contains__scC`sW|dkr|j}n|j|}|jr;| r;tS|j|j||jS(N(RRR7t is_prereleasetFalseR6R R!(R RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs   cc`st}g}i|dk r!|ntd6}xf|D]^}|j|}|j||r2|jr|pn|j r|j|qt}|Vq2q2W| r|rx|D] }|VqWndS(NR(R:RtTrueR7RR9Rtappend(R RRtyieldedtfound_prereleasestkwR!tparsed_version((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs     N(R R R4RR+R/RRRRR6R7tpropertyR R!RRR8RR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRNs       tLegacySpecifiercB`seZdZejdedejejBZidd6dd6dd6d d 6d d 6d d6ZdZ dZ dZ dZ dZ dZdZRS(s (?P(==|!=|<=|>=|<|>)) \s* (?P [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) s^\s*s\s*$tequals==t not_equals!=tless_than_equals<=tgreater_than_equals>=t less_thantcC`s(t|ts$tt|}n|S(N(R1RR-(R R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR7scC`s||j|kS(N(R7(R t prospectiveR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_equalscC`s||j|kS(N(R7(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_not_equalscC`s||j|kS(N(R7(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_less_than_equalscC`s||j|kS(N(R7(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_greater_than_equalscC`s||j|kS(N(R7(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_less_thanscC`s||j|kS(N(R7(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_greater_thans(R R t _regex_strtretcompiletVERBOSEt IGNORECASER"R4R7RLRMRNRORPRQ(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRBs"        c`s"tjfd}|S(Nc`s#t|tstS|||S(N(R1RR:(R RKR)(tfn(sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytwrapped s(t functoolstwraps(RWRX((RWsD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_require_version_compare st SpecifiercB`seZdZejdedejejBZidd6dd6dd6d d 6d d 6d d6dd6dd6Ze dZ e dZ e dZ e dZ e dZe dZe dZdZedZejdZRS(s (?P(~=|==|!=|<=|>=|<|>|===)) (?P (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?=RGRHRIRJt arbitrarys===cC`sfdjttjdt|d }|d7}|jd||oe|jd||S(Nt.cS`s|jd o|jd S(Ntposttdev(t startswith(tx((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytsis.*s>=s==(tjointlistt itertoolst takewhilet_version_splitR6(R RKR)tprefix((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_compatibles   cC`s|jdrht|j}t|d }tt|}|t| }t||\}}n't|}|jst|j}n||kS(Ns.*i(tendswithRtpublicRiR-tlent _pad_versiontlocal(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRLs  cC`s|j|| S(N(RL(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRMscC`s|t|kS(N(R(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRNscC`s|t|kS(N(R(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyROscC`sXt|}||kstS|j rT|jrTt|jt|jkrTtSntS(N(RR:R9t base_versionR;(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRPs  cC`st|}||kstS|j rT|jrTt|jt|jkrTtSn|jdk rt|jt|jkrtSntS(N(RR:tis_postreleaseRqRpRR;(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRQs  cC`s"t|jt|jkS(N(R-tlower(R RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_arbitraryscC`ss|jdk r|jS|j\}}|dkro|dkrY|jdrY|d }nt|jrotSntS( Ns==s>=s<=s~=s===s.*i(s==s>=s<=s~=s===(R(RR'RlRR9R;R:(R R R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs  cC`s ||_dS(N(R((R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs(R R RRRSRTRURVR"R4R[RkRLRMRNRORPRQRtRARR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR\s,^ # s^([0-9]+)((?:a|b|c|rc)[0-9]+)$cC`s\g}xO|jdD]>}tj|}|rG|j|jq|j|qW|S(NR_(tsplitt _prefix_regexR#textendtgroupsR<(R!tresultRR*((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRi'sc C`sgg}}|jttjd||jttjd||j|t|d|j|t|d|jddgtdt|dt|d|jddgtdt|dt|dttj|ttj|fS(NcS`s |jS(N(tisdigit(Rc((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRd6RcS`s |jS(N(Rz(Rc((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRd7Riit0(R<RfRgRhRntinserttmaxtchain(tlefttrightt left_splitt right_split((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRo2s ""//t SpecifierSetcB`seZdddZdZdZdZdZdZdZ dZ d Z e d Z e jd Z d Zdd ZddZRS(RcC`sg|jdD]}|jr|j^q}t}xL|D]D}y|jt|WqDtk r|jt|qDXqDWt||_||_ dS(Nt,( RuR&tsettaddR\RRBt frozensett_specsR((R t specifiersRtstparsedt specifier((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR+Os4   cC`s=|jdk r!dj|jnd}djt||S(Ns, prereleases={0!r}Rs(R(RR$RR-(R R.((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR/ds!cC`s djtd|jDS(NRcs`s|]}t|VqdS(N(R-(t.0R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys ns(RetsortedR(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRmscC`s t|jS(N(R0R(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRpscC`st|trt|}nt|ts1tSt}t|j|jB|_|jdkr|jdk r|j|_nZ|jdk r|jdkr|j|_n-|j|jkr|j|_n td|S(NsFCannot combine SpecifierSets with True and False prerelease overrides.( R1RRR2RRR(Rt ValueError(R RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__and__ss  cC`set|trt|}n7t|trBtt|}nt|tsUtS|j|jkS(N(R1RRRR-R2R(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`set|trt|}n7t|trBtt|}nt|tsUtS|j|jkS(N(R1RRRR-R2R(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s t|jS(N(RnR(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__len__scC`s t|jS(N(titerR(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__iter__scC`s:|jdk r|jS|js#dStd|jDS(Ncs`s|]}|jVqdS(N(R(RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys s(R(RRtany(R ((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs  cC`s ||_dS(N(R((R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s |j|S(N(R(R R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR8sc`sptttfs$tndkr<|jn rPjrPtStfd|j DS(Nc3`s$|]}|jdVqdS(RN(R(RR(RR(sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys s( R1RRRRRR9R:tallR(R RR((RRsD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs  cC`s |dkr|j}n|jrTx,|jD]!}|j|dt|}q+W|Sg}g}x|D]{}t|ttfst|}n|}t|trqgn|j r| r|s|j |qqg|j |qgW| r|r|dkr|S|SdS(NR( RRRRtboolR1RRRR9R<(R RRR)tfilteredR>RR@((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs*    N(R R RR+R/RRRRRRRRARRR8RR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRMs       (t __future__RRRRRYRgRSt_compatRRR!RRRRRtABCMetatobjectR RRBR[R\RTRvRiRoR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyts"    "94  PKe[_packaging/requirements.pyonu[ abc@`sYddlmZmZmZddlZddlZddlmZmZm Z m Z ddlm Z m Z m Z mZmZddlmZddlmZddlmZmZdd lmZmZmZd efd YZe ejejZ ed j!Z"ed j!Z#edj!Z$edj!Z%edj!Z&edj!Z'edj!Z(e dZ)e e e)e BZ*ee e e*Z+e+dZ,e+Z-eddZ.e(e.Z/e-e e&e-Z0e"e e0e#dZ1eej2ej3ej4BZ5eej2ej3ej4BZ6e5e6AZ7ee7e e&e7ddde8dZ9e e$e9e%e9BZ:e:j;de e:dZ<e<j;de edZej;de'Z=e=eZ>e<e e>Z?e/e e>Z@e,e e1e@e?BZAeeAeZBd eCfd!YZDdS("i(tabsolute_importtdivisiontprint_functionN(t stringStartt stringEndtoriginalTextFortParseException(t ZeroOrMoretWordtOptionaltRegextCombine(tLiteral(tparsei(t MARKER_EXPRtMarker(tLegacySpecifiert Specifiert SpecifierSettInvalidRequirementcB`seZdZRS(sJ An invalid requirement was found, users should refer to PEP 508. (t__name__t __module__t__doc__(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyRst[t]t(t)t,t;t@s-_.tnames[^ ]+turltextrast joinStringtadjacentt _raw_speccC`s |jp dS(Nt(R#(tstltt((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt8R$t specifiercC`s|dS(Ni((R%R&R'((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR(;R$tmarkercC`st||j|j!S(N(Rt_original_startt _original_end(R%R&R'((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR(?R$t RequirementcB`s)eZdZdZdZdZRS(sParse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. cC`sytj|}Wn9tk rN}tdj||j|jd!nX|j|_|jrtj|j}|j o|j s|j r|j rtdn|j|_n d|_t |j r|j jng|_ t|j|_|jr|jnd|_dS(Ns+Invalid requirement, parse error at "{0!r}"isInvalid URL given(t REQUIREMENTt parseStringRRtformattlocRRturlparsetschemetnetloctNonetsetR tasListRR)R*(tselftrequirement_stringtreqtet parsed_url((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__init__Zs"!   'cC`s|jg}|jr@|jdjdjt|jn|jrb|jt|jn|jr|jdj|jn|j r|jdj|j ndj|S(Ns[{0}]Rs@ {0}s; {0}R$( RR tappendR0tjointsortedR)tstrRR*(R8tparts((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__str__os  +   cC`sdjt|S(Ns(R0RA(R8((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__repr__s(RRRR=RCRD(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR-Ms  (Et __future__RRRtstringtretpip._vendor.pyparsingRRRRRRR R R R tLtpip._vendor.six.moves.urllibR R2tmarkersRRt specifiersRRRt ValueErrorRt ascii_letterstdigitstALPHANUMtsuppresstLBRACKETtRBRACKETtLPARENtRPARENtCOMMAt SEMICOLONtATt PUNCTUATIONtIDENTIFIER_ENDt IDENTIFIERtNAMEtEXTRAtURItURLt EXTRAS_LISTtEXTRASt _regex_strtVERBOSEt IGNORECASEtVERSION_PEP440tVERSION_LEGACYt VERSION_ONEtFalset VERSION_MANYt _VERSION_SPECtsetParseActiont VERSION_SPECtMARKER_SEPERATORtMARKERtVERSION_AND_MARKERtURL_AND_MARKERtNAMED_REQUIREMENTR.tobjectR-(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pytsZ  "(      PKe[D;packaging/__about__.pyonu[ abc@`srddlmZmZmZdddddddd gZd Zd Zd Zd ZdZ dZ dZ de Z dS(i(tabsolute_importtdivisiontprint_functiont __title__t __summary__t__uri__t __version__t __author__t __email__t __license__t __copyright__t packagings"Core utilities for Python packagess!https://github.com/pypa/packagings16.8s)Donald Stufft and individual contributorssdonald@stufft.ios"BSD or Apache License, Version 2.0sCopyright 2014-2016 %sN( t __future__RRRt__all__RRRRRRR R (((sC/usr/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.pytsPKe[packaging/_compat.pycnu[ abc@`svddlmZmZmZddlZejddkZejddkZer`efZ n e fZ dZ dS(i(tabsolute_importtdivisiontprint_functionNiic`s5dffdY}tj|ddiS(s/ Create a base class with a metaclass. t metaclassc`seZfdZRS(c`s||S(N((tclstnamet this_basestd(tbasestmeta(sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyt__new__s(t__name__t __module__R ((RR (sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyRsttemporary_class((ttypeR (R RR((RR sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pytwith_metaclasss( t __future__RRRtsyst version_infotPY2tPY3tstrt string_typest basestringR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyts   PKe[Z]2-2-packaging/markers.pyonu[ abc@`suddlmZmZmZddlZddlZddlZddlZddlm Z m Z m Z m Z ddlm Z mZmZmZddlmZddlmZddlmZmZd d d d d gZd efdYZd efdYZd efdYZdefdYZdefdYZdefdYZ defdYZ!ededBedBedBedBedBedBed Bed!Bed"Bed#Bed$Bed%Bed&Bed'Bed(Bed)Bed*BZ"id#d$6d"d%6dd&6dd'6dd(6dd)6Z#e"j$d+ed,ed-Bed.Bed/Bed0Bed1Bed2Bed3BZ%e%ed4Bed5BZ&e&j$d6ed7ed8BZ'e'j$d9ed:ed;BZ(e"e'BZ)ee)e&e)Z*e*j$d<ed=j+Z,ed>j+Z-eZ.e*ee,e.e-BZ/e.e/e e(e.>e e.e Z0d?Z1e2d@Z3idAd56dBd46ej4d36ej5d/6ej6d-6ej7d06ej8d.6ej9d26Z:dCZ;eZ<dDZ=dEZ>dFZ?dGZ@d efdHYZAdS(Ii(tabsolute_importtdivisiontprint_functionN(tParseExceptiont ParseResultst stringStartt stringEnd(t ZeroOrMoretGrouptForwardt QuotedString(tLiterali(t string_types(t SpecifiertInvalidSpecifiert InvalidMarkertUndefinedComparisontUndefinedEnvironmentNametMarkertdefault_environmentcB`seZdZRS(sE An invalid marker was found, users should refer to PEP 508. (t__name__t __module__t__doc__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscB`seZdZRS(sP An invalid operation was attempted on a value that doesn't support it. (RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!scB`seZdZRS(s\ A name was attempted to be used that does not exist inside of the environment. (RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR'stNodecB`s,eZdZdZdZdZRS(cC`s ||_dS(N(tvalue(tselfR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__init__0scC`s t|jS(N(tstrR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__str__3scC`sdj|jjt|S(Ns <{0}({1!r})>(tformatt __class__RR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__repr__6scC`s tdS(N(tNotImplementedError(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt serialize9s(RRRRRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR.s   tVariablecB`seZdZRS(cC`s t|S(N(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!?s(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR"=stValuecB`seZdZRS(cC`s dj|S(Ns"{0}"(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!Es(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR#CstOpcB`seZdZRS(cC`s t|S(N(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!Ks(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR$Istimplementation_versiontplatform_python_implementationtimplementation_nametpython_full_versiontplatform_releasetplatform_versiontplatform_machinetplatform_systemtpython_versiont sys_platformtos_namesos.names sys.platformsplatform.versionsplatform.machinesplatform.python_implementationtpython_implementationtextracC`sttj|d|dS(Ni(R"tALIASEStget(tstltt((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytkts===s==s>=s<=s!=s~=t>tst RARB(RCtlisttlenR@RHtjoinR!(tmarkerRGtinnerRK((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRHs! &cC`s ||kS(N((tlhstrhs((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR7R8cC`s ||kS(N((RRRS((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR7R8cC`sy%tdj|j|g}Wntk r8nX|j|Stj|j}|dkrtdj |||n|||S(NR8s#Undefined {0!r} on {1!r} and {2!r}.( R ROR!Rtcontainst _operatorsR3tNoneRR(RRtopRStspectoper((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt_eval_ops%   cC`s:|j|t}|tkr6tdj|n|S(Ns/{0!r} does not exist in evaluation environment.(R3t _undefinedRR(t environmenttnameR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt_get_envs  c C`sgg}x|D]}t|trB|djt||qt|tr|\}}}t|trt||j}|j}n|j}t||j}|djt|||q|dkr|jgqqWt d|DS(NiR?cs`s|]}t|VqdS(N(tall(RJtitem((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pys s( RCRMtappendt_evaluate_markersR@R"R^RRZtany( tmarkersR\tgroupsRPRRRWRSt lhs_valuet rhs_value((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRbs      cC`sFdj|}|j}|dkrB||dt|j7}n|S(Ns{0.major}.{0.minor}.{0.micro}tfinali(Rt releaselevelRtserial(tinfotversiontkind((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytformat_full_versions   cC`sttdr0ttjj}tjj}n d}d}i |d6|d6tjd6tjd6tj d6tj d 6tjd 6tj d 6tj d 6tj d d6tjd6S(Ntimplementationt0R8R'R%R/R+R)R,R*R(R&iR-R.( thasattrtsysRnRoRlR]tostplatformtmachinetreleasetsystemR-R0(tiverR'((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRs"       cB`s/eZdZdZdZddZRS(cC`seyttj||_WnBtk r`}dj|||j|jd!}t|nXdS(Ns+Invalid marker: {0!r}, parse error at {1!r}i(RDtMARKERt parseStringt_markersRRtlocR(RRPteterr_str((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRs cC`s t|jS(N(RHR{(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscC`sdjt|S(Ns(RR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscC`s5t}|dk r%|j|nt|j|S(s$Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. N(RRVtupdateRbR{(RR\tcurrent_environment((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytevaluate"s  N(RRRRRRVR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRs   (Bt __future__RRRtoperatorRsRtRrtpip._vendor.pyparsingRRRRRRR R R tLt_compatR t specifiersR Rt__all__t ValueErrorRRRtobjectRR"R#R$tVARIABLER2tsetParseActiont VERSION_CMPt MARKER_OPt MARKER_VALUEtBOOLOPt MARKER_VARt MARKER_ITEMtsuppresstLPARENtRPARENt MARKER_EXPRt MARKER_ATOMRyRDtTrueRHtlttleteqtnetgetgtRURZR[R^RbRnRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyts|    ""     E                  PKe[_packaging/requirements.pycnu[ abc@`sYddlmZmZmZddlZddlZddlmZmZm Z m Z ddlm Z m Z m Z mZmZddlmZddlmZddlmZmZdd lmZmZmZd efd YZe ejejZ ed j!Z"ed j!Z#edj!Z$edj!Z%edj!Z&edj!Z'edj!Z(e dZ)e e e)e BZ*ee e e*Z+e+dZ,e+Z-eddZ.e(e.Z/e-e e&e-Z0e"e e0e#dZ1eej2ej3ej4BZ5eej2ej3ej4BZ6e5e6AZ7ee7e e&e7ddde8dZ9e e$e9e%e9BZ:e:j;de e:dZ<e<j;de edZej;de'Z=e=eZ>e<e e>Z?e/e e>Z@e,e e1e@e?BZAeeAeZBd eCfd!YZDdS("i(tabsolute_importtdivisiontprint_functionN(t stringStartt stringEndtoriginalTextFortParseException(t ZeroOrMoretWordtOptionaltRegextCombine(tLiteral(tparsei(t MARKER_EXPRtMarker(tLegacySpecifiert Specifiert SpecifierSettInvalidRequirementcB`seZdZRS(sJ An invalid requirement was found, users should refer to PEP 508. (t__name__t __module__t__doc__(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyRst[t]t(t)t,t;t@s-_.tnames[^ ]+turltextrast joinStringtadjacentt _raw_speccC`s |jp dS(Nt(R#(tstltt((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt8R$t specifiercC`s|dS(Ni((R%R&R'((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR(;R$tmarkercC`st||j|j!S(N(Rt_original_startt _original_end(R%R&R'((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR(?R$t RequirementcB`s)eZdZdZdZdZRS(sParse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. cC`sytj|}Wn9tk rN}tdj||j|jd!nX|j|_|jrtj|j}|j o|j s|j r|j rtdn|j|_n d|_t |j r|j jng|_ t|j|_|jr|jnd|_dS(Ns+Invalid requirement, parse error at "{0!r}"isInvalid URL given(t REQUIREMENTt parseStringRRtformattlocRRturlparsetschemetnetloctNonetsetR tasListRR)R*(tselftrequirement_stringtreqtet parsed_url((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__init__Zs"!   'cC`s|jg}|jr@|jdjdjt|jn|jrb|jt|jn|jr|jdj|jn|j r|jdj|j ndj|S(Ns[{0}]Rs@ {0}s; {0}R$( RR tappendR0tjointsortedR)tstrRR*(R8tparts((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__str__os  +   cC`sdjt|S(Ns(R0RA(R8((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyt__repr__s(RRRR=RCRD(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyR-Ms  (Et __future__RRRtstringtretpip._vendor.pyparsingRRRRRRR R R R tLtpip._vendor.six.moves.urllibR R2tmarkersRRt specifiersRRRt ValueErrorRt ascii_letterstdigitstALPHANUMtsuppresstLBRACKETtRBRACKETtLPARENtRPARENtCOMMAt SEMICOLONtATt PUNCTUATIONtIDENTIFIER_ENDt IDENTIFIERtNAMEtEXTRAtURItURLt EXTRAS_LISTtEXTRASt _regex_strtVERBOSEt IGNORECASEtVERSION_PEP440tVERSION_LEGACYt VERSION_ONEtFalset VERSION_MANYt _VERSION_SPECtsetParseActiont VERSION_SPECtMARKER_SEPERATORtMARKERtVERSION_AND_MARKERtURL_AND_MARKERtNAMED_REQUIREMENTR.tobjectR-(((sF/usr/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pytsZ  "(      PKe[SE77packaging/version.pyonu[ abc@`snddlmZmZmZddlZddlZddlZddlmZddddd gZ ej d d d d dddgZ dZ de fdYZdefdYZdefdYZejdejZidd6dd6dd6dd6dd 6ZdZdZdZdefd YZd!Zejd"Zd#Zd$ZdS(%i(tabsolute_importtdivisiontprint_functionNi(tInfinitytparsetVersiont LegacyVersiontInvalidVersiontVERSION_PATTERNt_VersiontepochtreleasetdevtpretposttlocalcC`s-yt|SWntk r(t|SXdS(s Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. N(RRR(tversion((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRs cB`seZdZRS(sF An invalid version was found, users should refer to PEP 440. (t__name__t __module__t__doc__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR$st _BaseVersioncB`sPeZdZdZdZdZdZdZdZdZ RS(cC`s t|jS(N(thasht_key(tself((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__hash__,scC`s|j|dS(NcS`s ||kS(N((tsto((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt0t(t_compare(Rtother((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__lt__/scC`s|j|dS(NcS`s ||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR3R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__le__2scC`s|j|dS(NcS`s ||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR6R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__eq__5scC`s|j|dS(NcS`s ||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR9R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__ge__8scC`s|j|dS(NcS`s ||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR<R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__gt__;scC`s|j|dS(NcS`s ||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR?R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__ne__>scC`s&t|tstS||j|jS(N(t isinstanceRtNotImplementedR(RRtmethod((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRAs( RRRRR R!R"R#R$R(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR*s       cB`sneZdZdZdZedZedZedZedZ edZ RS(cC`s%t||_t|j|_dS(N(tstrt_versiont_legacy_cmpkeyR(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__init__JscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__str__NscC`sdjtt|S(Ns(tformattreprR((R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__repr__QscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pytpublicTscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt base_versionXscC`sdS(N(tNone(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR\scC`stS(N(tFalse(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt is_prerelease`scC`stS(N(R3(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pytis_postreleaseds( RRR+R,R/tpropertyR0R1RR4R5(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRHs   s(\d+ | [a-z]+ | \.| -)tctpreviewsfinal-t-trct@cc`sxxltj|D][}tj||}| s|dkrAqn|d dkrb|jdVqd|VqWdVdS(Nt.it 0123456789it*s*final(t_legacy_version_component_retsplitt_legacy_version_replacement_maptgettzfill(Rtpart((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt_parse_version_partsrs cC`sd}g}xt|jD]}|jdr|dkrjx'|rf|ddkrf|jqCWnx'|r|ddkr|jqmWn|j|qWt|}||fS(NiR>s*finals*final-t00000000(REtlowert startswithtpoptappendttuple(RR tpartsRD((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR*s  s v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_\.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
cB`seZejdedejejBZdZdZ	dZ
edZedZ
edZedZed	ZRS(
s^\s*s\s*$cC`s[|jj|}|s0tdj|ntd|jdrZt|jdnddtd|jdjdDdt	|jd|jd	d
t	|jd|jdp|jd
dt	|jd|jddt
|jd|_t|jj
|jj|jj|jj|jj|jj|_dS(NsInvalid version: '{0}'R
iRcs`s|]}t|VqdS(N(tint(t.0ti((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sR<R
tpre_ltpre_nRtpost_ltpost_n1tpost_n2Rtdev_ltdev_nR(t_regextsearchRR-R	tgroupRMRKR@t_parse_letter_versiont_parse_local_versionR)t_cmpkeyR
RR
RRRR(RRtmatch((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR+s.*(!					cC`sdjtt|S(Ns(R-R.R((R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR/scC`sSg}|jjdkr7|jdj|jjn|jdjd|jjD|jjdk	r|jdjd|jjDn|jjdk	r|jdj|jjdn|jj	dk	r|jd	j|jj	dn|jj
dk	rF|jd
jdjd|jj
Dndj|S(Nis{0}!R<cs`s|]}t|VqdS(N(R((RNtx((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sRcs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	ss.post{0}is.dev{0}s+{0}cs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	s(R)R
RJR-tjoinRR
R2RRR(RRL((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR,s&)##,cC`st|jdddS(Nt+ii(R(R@(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR0
scC`sjg}|jjdkr7|jdj|jjn|jdjd|jjDdj|S(Nis{0}!R<cs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sR(R)R
RJR-R_R(RRL((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR1s
&cC`s0t|}d|kr,|jdddSdS(NR`i(R(R@(Rtversion_string((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRscC`st|jjp|jjS(N(tboolR)RR
(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR4!scC`st|jjS(N(RbR)R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR5%s(RRtretcompileRtVERBOSEt
IGNORECASERWR+R/R,R6R0R1RR4R5(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRs	#		
cC`s|r|dkrd}n|j}|dkr<d}n?|dkrQd}n*|d
krfd	}n|dkr{d}n|t|fS|r|rd}|t|fSdS(NitalphatatbetatbR7R
R8R:trevtrR(R7R
R8(RkRl(R2RGRM(tlettertnumber((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRZ*s 					
s[\._-]cC`s-|dk	r)tdtj|DSdS(sR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    cs`s3|])}|js!|jn	t|VqdS(N(tisdigitRGRM(RNRD((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	RsN(R2RKt_local_version_seperatorsR@(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR[LscC`sttttjdt|}|dkr[|dkr[|dk	r[t}n|dkrpt}n|dkrt}n|dkrt}n|dkrt}ntd|D}||||||fS(NcS`s
|dkS(Ni((R^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR`Rcs`s7|]-}t|tr$|dfn
t|fVqdS(RN(R%RMR(RNRO((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	s(RKtreversedtlistt	itertoolst	dropwhileR2R(R
RR
RRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR\Ws&	$
	
	
	
(t
__future__RRRtcollectionsRsRct_structuresRt__all__t
namedtupleR	Rt
ValueErrorRtobjectRRRdReR?RARER*RRRZRpR[R\(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyts0	!&		9k		PKe[b\\packaging/__init__.pycnu[
abc@`sxddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS(
i(tabsolute_importtdivisiontprint_functioni(t
__author__t
__copyright__t	__email__t__license__t__summary__t	__title__t__uri__t__version__RRR	R
RRRRN(t
__future__RRRt	__about__RRRRRRR	R
t__all__(((sB/usr/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.pyts:PKe[tnK--packaging/markers.pycnu[
abc@`suddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
mZmZddlm
Z
mZmZmZddlmZddlmZddlmZmZd	d
ddd
gZd	efdYZd
efdYZdefdYZdefdYZdefdYZdefdYZ defdYZ!ededBedBedBedBedBedBed Bed!Bed"Bed#Bed$Bed%Bed&Bed'Bed(Bed)Bed*BZ"id#d$6d"d%6dd&6dd'6dd(6dd)6Z#e"j$d+ed,ed-Bed.Bed/Bed0Bed1Bed2Bed3BZ%e%ed4Bed5BZ&e&j$d6ed7ed8BZ'e'j$d9ed:ed;BZ(e"e'BZ)ee)e&e)Z*e*j$d<ed=j+Z,ed>j+Z-eZ.e*ee,e.e-BZ/e.e/e
e(e.>ee.eZ0d?Z1e2d@Z3idAd56dBd46ej4d36ej5d/6ej6d-6ej7d06ej8d.6ej9d26Z:dCZ;eZ<dDZ=dEZ>dFZ?dGZ@defdHYZAdS(Ii(tabsolute_importtdivisiontprint_functionN(tParseExceptiontParseResultststringStartt	stringEnd(t
ZeroOrMoretGrouptForwardtQuotedString(tLiterali(tstring_types(t	SpecifiertInvalidSpecifiert
InvalidMarkertUndefinedComparisontUndefinedEnvironmentNametMarkertdefault_environmentcB`seZdZRS(sE
    An invalid marker was found, users should refer to PEP 508.
    (t__name__t
__module__t__doc__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscB`seZdZRS(sP
    An invalid operation was attempted on a value that doesn't support it.
    (RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!scB`seZdZRS(s\
    A name was attempted to be used that does not exist inside of the
    environment.
    (RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR'stNodecB`s,eZdZdZdZdZRS(cC`s
||_dS(N(tvalue(tselfR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__init__0scC`s
t|jS(N(tstrR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__str__3scC`sdj|jjt|S(Ns<{0}({1!r})>(tformatt	__class__RR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt__repr__6scC`s
tdS(N(tNotImplementedError(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt	serialize9s(RRRRRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR.s			tVariablecB`seZdZRS(cC`s
t|S(N(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!?s(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR"=stValuecB`seZdZRS(cC`s
dj|S(Ns"{0}"(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!Es(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR#CstOpcB`seZdZRS(cC`s
t|S(N(R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR!Ks(RRR!(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR$Istimplementation_versiontplatform_python_implementationtimplementation_nametpython_full_versiontplatform_releasetplatform_versiontplatform_machinetplatform_systemtpython_versiontsys_platformtos_namesos.namessys.platformsplatform.versionsplatform.machinesplatform.python_implementationtpython_implementationtextracC`sttj|d|dS(Ni(R"tALIASEStget(tstltt((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytkts===s==s>=s<=s!=s~=t>tst RARB(	RCtlistR@RtAssertionErrortlenRHtjoinR!(tmarkerRGtinnerRK((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRHs!
&cC`s
||kS(N((tlhstrhs((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR7R8cC`s
||kS(N((RSRT((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyR7R8cC`sy%tdj|j|g}Wntk
r8nX|j|Stj|j}|dkrtdj	|||n|||S(NR8s#Undefined {0!r} on {1!r} and {2!r}.(
R
RPR!Rtcontainst
_operatorsR3tNoneRR(RStopRTtspectoper((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt_eval_ops%

cC`s:|j|t}|tkr6tdj|n|S(Ns/{0!r} does not exist in evaluation environment.(R3t
_undefinedRR(tenvironmenttnameR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyt_get_envs
c	C`s,gg}x|D]}t|tttfs4tt|tr`|djt||qt|tr|\}}}t|trt||j	}|j	}n|j	}t||j	}|djt
|||q|dkst|dkr|jgqqWtd|DS(NiR>R?cs`s|]}t|VqdS(N(tall(RJtitem((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pys	s(R>R?(RCRMR@RRNtappendt_evaluate_markersR"R_RR[tany(	tmarkersR]tgroupsRQRSRXRTt	lhs_valuet	rhs_value((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRcs"	
	 cC`sFdj|}|j}|dkrB||dt|j7}n|S(Ns{0.major}.{0.minor}.{0.micro}tfinali(RtreleaselevelRtserial(tinfotversiontkind((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytformat_full_versions
	cC`sttdr0ttjj}tjj}nd}d}i|d6|d6tjd6tjd6tj	d6tj
d	6tjd
6tjd6tjd6tjd
 d6tjd6S(Ntimplementationt0R8R'R%R/R+R)R,R*R(R&iR-R.(
thasattrtsysRoRpRmR^tostplatformtmachinetreleasetsystemR-R0(tiverR'((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRs"






cB`s/eZdZdZdZddZRS(cC`seyttj||_WnBtk
r`}dj|||j|jd!}t|nXdS(Ns+Invalid marker: {0!r}, parse error at {1!r}i(RDtMARKERtparseStringt_markersRRtlocR(RRQteterr_str((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscC`s
t|jS(N(RHR|(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscC`sdjt|S(Ns(RR(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRscC`s5t}|dk	r%|j|nt|j|S(s$Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        N(RRWtupdateRcR|(RR]tcurrent_environment((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pytevaluate"s		N(RRRRRRWR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyRs			(Bt
__future__RRRtoperatorRtRuRstpip._vendor.pyparsingRRRRRRR	R
RtLt_compatRt
specifiersR
Rt__all__t
ValueErrorRRRtobjectRR"R#R$tVARIABLER2tsetParseActiontVERSION_CMPt	MARKER_OPtMARKER_VALUEtBOOLOPt
MARKER_VARtMARKER_ITEMtsuppresstLPARENtRPARENtMARKER_EXPRtMARKER_ATOMRzRDtTrueRHtlttleteqtnetgetgtRVR[R\R_RcRoRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyts|""	

	E

		







						PKe[pDDpackaging/utils.pycnu[
abc@`sDddlmZmZmZddlZejdZdZdS(i(tabsolute_importtdivisiontprint_functionNs[-_.]+cC`stjd|jS(Nt-(t_canonicalize_regextsubtlower(tname((s?/usr/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pytcanonicalize_names(t
__future__RRRtretcompileRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pytsPKe[packaging/_structures.pycnu[
abc@`s^ddlmZmZmZdefdYZeZdefdYZeZdS(i(tabsolute_importtdivisiontprint_functiontInfinitycB`sYeZdZdZdZdZdZdZdZdZ	dZ
RS(	cC`sdS(NR((tself((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__repr__	scC`stt|S(N(thashtrepr(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__hash__scC`stS(N(tFalse(Rtother((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__lt__scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__le__scC`st||jS(N(t
isinstancet	__class__(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__eq__scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__ne__scC`stS(N(tTrue(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__gt__scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__ge__scC`stS(N(tNegativeInfinity(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__neg__!s(t__name__t
__module__RRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyRs								RcB`sYeZdZdZdZdZdZdZdZdZ	dZ
RS(	cC`sdS(Ns	-Infinity((R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR)scC`stt|S(N(RR(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR,scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR/scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR2scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR5scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR8scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR;scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR>scC`stS(N(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyRAs(RRRRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR's								N(t
__future__RRRtobjectRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyts	PKe[BvD&``packaging/specifiers.pyonu[
abc@`s<ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZm
Z
mZdefdYZde
ejefd	YZd
efdYZdefd
YZdZdefdYZejdZdZdZdefdYZdS(i(tabsolute_importtdivisiontprint_functionNi(tstring_typestwith_metaclass(tVersiont
LegacyVersiontparsetInvalidSpecifiercB`seZdZRS(sH
    An invalid specifier was found, users should refer to PEP 440.
    (t__name__t
__module__t__doc__(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRst
BaseSpecifiercB`seZejdZejdZejdZejdZejdZ	e	j
dZ	ejddZejddZ
RS(	cC`sdS(s
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        N((tself((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__str__tcC`sdS(sF
        Returns a hash value for this Specifier like object.
        N((R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__hash__RcC`sdS(sq
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        N((R
tother((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__eq__$RcC`sdS(su
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        N((R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__ne__+RcC`sdS(sg
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        N((R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytprereleases2RcC`sdS(sd
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        N((R
tvalue((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR9RcC`sdS(sR
        Determines if the given item is contained within this specifier.
        N((R
titemR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytcontains@RcC`sdS(s
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        N((R
titerableR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytfilterFRN(R	R
tabctabstractmethodRRRRtabstractpropertyRtsettertNoneRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRst_IndividualSpecifiercB`seZiZdddZdZdZdZdZdZ	dZ
dZed	Z
ed
ZedZejdZd
ZddZddZRS(RcC`sj|jj|}|s0tdj|n|jdj|jdjf|_||_dS(NsInvalid specifier: '{0}'toperatortversion(t_regextsearchRtformattgrouptstript_spect_prereleases(R
tspecRtmatch((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__init__RscC`sF|jdk	r!dj|jnd}dj|jjt||S(Ns, prereleases={0!r}Rs<{0}({1!r}{2})>(R(RR$Rt	__class__R	tstr(R
tpre((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__repr___s!		cC`sdj|jS(Ns{0}{1}(R$R'(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRlscC`s
t|jS(N(thashR'(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRoscC`s`t|tr:y|j|}WqPtk
r6tSXnt||jsPtS|j|jkS(N(t
isinstanceRR,RtNotImplementedR'(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRrs
cC`s`t|tr:y|j|}WqPtk
r6tSXnt||jsPtS|j|jkS(N(R1RR,RR2R'(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR}s
cC`st|dj|j|S(Ns_compare_{0}(tgetattrR$t
_operators(R
top((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt
_get_operatorscC`s(t|ttfs$t|}n|S(N(R1RRR(R
R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_coerce_versionscC`s|jdS(Ni(R'(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR scC`s|jdS(Ni(R'(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR!scC`s|jS(N(R((R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s
||_dS(N(R((R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s
|j|S(N(R(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__contains__scC`sW|dkr|j}n|j|}|jr;|r;tS|j|j||jS(N(RRR7t
is_prereleasetFalseR6R R!(R
RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscc`st}g}i|dk	r!|ntd6}xf|D]^}|j|}|j||r2|jr|pn|jr|j|qt}|Vq2q2W|r|rx|D]}|VqWndS(NR(R:RtTrueR7RR9Rtappend(R
RRtyieldedtfound_prereleasestkwR!tparsed_version((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs
	

N(R	R
R4RR+R/RRRRR6R7tpropertyR R!RRR8RR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRNs 
	
							tLegacySpecifiercB`seZdZejdedejejBZidd6dd6dd6d	d
6dd6d
d6ZdZ	dZ
dZdZdZ
dZdZRS(s
        (?P(==|!=|<=|>=|<|>))
        \s*
        (?P
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        s^\s*s\s*$tequals==t	not_equals!=tless_than_equals<=tgreater_than_equals>=t	less_thantcC`s(t|ts$tt|}n|S(N(R1RR-(R
R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR7scC`s||j|kS(N(R7(R
tprospectiveR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_equalscC`s||j|kS(N(R7(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_not_equalscC`s||j|kS(N(R7(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_less_than_equalscC`s||j|kS(N(R7(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_greater_than_equalscC`s||j|kS(N(R7(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_less_thanscC`s||j|kS(N(R7(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_greater_thans(R	R
t
_regex_strtretcompiletVERBOSEt
IGNORECASER"R4R7RLRMRNRORPRQ(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRBs"

						c`s"tjfd}|S(Nc`s#t|tstS|||S(N(R1RR:(R
RKR)(tfn(sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytwrappeds(t	functoolstwraps(RWRX((RWsD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_require_version_compare
st	SpecifiercB`seZdZejdedejejBZidd6dd6dd6d	d
6dd6d
d6dd6dd6Ze	dZ
e	dZe	dZe	dZ
e	dZe	dZe	dZdZedZejdZRS(s
        (?P(~=|==|!=|<=|>=|<|>|===))
        (?P
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?=RGRHRIRJt	arbitrarys===cC`sfdjttjdt|d }|d7}|jd||oe|jd||S(Nt.cS`s|jdo|jdS(Ntposttdev(t
startswith(tx((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pytsis.*s>=s==(tjointlistt	itertoolst	takewhilet_version_splitR6(R
RKR)tprefix((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_compatibles

cC`s|jdrht|j}t|d }tt|}|t| }t||\}}n't|}|jst|j}n||kS(Ns.*i(tendswithRtpublicRiR-tlent_pad_versiontlocal(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRLs	cC`s|j||S(N(RL(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRMscC`s|t|kS(N(R(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRNscC`s|t|kS(N(R(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyROscC`sXt|}||kstS|jrT|jrTt|jt|jkrTtSntS(N(RR:R9tbase_versionR;(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRPscC`st|}||kstS|jrT|jrTt|jt|jkrTtSn|jdk	rt|jt|jkrtSntS(N(RR:tis_postreleaseRqRpRR;(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRQscC`s"t|jt|jkS(N(R-tlower(R
RKR)((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt_compare_arbitraryscC`ss|jdk	r|jS|j\}}|dkro|dkrY|jdrY|d }nt|jrotSntS(	Ns==s>=s<=s~=s===s.*i(s==s>=s<=s~=s===(R(RR'RlRR9R;R:(R
R R!((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs
cC`s
||_dS(N(R((R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs(R	R
RRRSRTRURVR"R4R[RkRLRMRNRORPRQRtRARR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR\s,^
#	s^([0-9]+)((?:a|b|c|rc)[0-9]+)$cC`s\g}xO|jdD]>}tj|}|rG|j|jq|j|qW|S(NR_(tsplitt
_prefix_regexR#textendtgroupsR<(R!tresultRR*((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRi'sc	C`sgg}}|jttjd||jttjd||j|t|d|j|t|d|jddgtdt|dt|d|jddgtdt|dt|dttj|ttj|fS(NcS`s
|jS(N(tisdigit(Rc((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRd6RcS`s
|jS(N(Rz(Rc((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRd7Riit0(R<RfRgRhRntinserttmaxtchain(tlefttrightt
left_splittright_split((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRo2s
""//tSpecifierSetcB`seZdddZdZdZdZdZdZdZ	dZ
d	Zed
Z
e
jdZ
dZdd
ZddZRS(RcC`sg|jdD]}|jr|j^q}t}xL|D]D}y|jt|WqDtk
r|jt|qDXqDWt||_||_	dS(Nt,(
RuR&tsettaddR\RRBt	frozensett_specsR((R
t
specifiersRtstparsedt	specifier((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR+Os4	

cC`s=|jdk	r!dj|jnd}djt||S(Ns, prereleases={0!r}Rs(R(RR$RR-(R
R.((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR/ds!cC`s djtd|jDS(NRcs`s|]}t|VqdS(N(R-(t.0R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys	ns(RetsortedR(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRmscC`s
t|jS(N(R0R(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRpscC`st|trt|}nt|ts1tSt}t|j|jB|_|jdkr|jdk	r|j|_nZ|jdk	r|jdkr|j|_n-|j|jkr|j|_ntd|S(NsFCannot combine SpecifierSets with True and False prerelease overrides.(	R1RRR2RRR(Rt
ValueError(R
RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__and__ss		cC`set|trt|}n7t|trBtt|}nt|tsUtS|j|jkS(N(R1RRRR-R2R(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`set|trt|}n7t|trBtt|}nt|tsUtS|j|jkS(N(R1RRRR-R2R(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s
t|jS(N(RnR(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__len__scC`s
t|jS(N(titerR(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyt__iter__scC`s:|jdk	r|jS|js#dStd|jDS(Ncs`s|]}|jVqdS(N(R(RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys	s(R(RRtany(R
((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs
	cC`s
||_dS(N(R((R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s
|j|S(N(R(R
R((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyR8sc`sptttfs$tndkr<|jnrPjrPtStfd|j	DS(Nc3`s$|]}|jdVqdS(RN(R(RR(RR(sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pys	s(
R1RRRRRR9R:tallR(R
RR((RRsD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRscC`s|dkr|j}n|jrTx,|jD]!}|j|dt|}q+W|Sg}g}x|D]{}t|ttfst|}n|}t|trqgn|j	r|r|s|j
|qqg|j
|qgW|r|r|dkr|S|SdS(NR(RRRRtboolR1RRRR9R<(R
RRR)tfilteredR>RR@((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRs*	
N(R	R
RR+R/RRRRRRRRARRR8RR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyRMs						
	
			(t
__future__RRRRRYRgRSt_compatRRR!RRRRRtABCMetatobjectRRRBR[R\RTRvRiRoR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyts""94				PKe[D;packaging/__about__.pycnu[
abc@`srddlmZmZmZdddddddd	gZd
ZdZdZd
ZdZ	dZ
dZde	ZdS(i(tabsolute_importtdivisiontprint_functiont	__title__t__summary__t__uri__t__version__t
__author__t	__email__t__license__t
__copyright__t	packagings"Core utilities for Python packagess!https://github.com/pypa/packagings16.8s)Donald Stufft and individual contributorssdonald@stufft.ios"BSD or Apache License, Version 2.0sCopyright 2014-2016 %sN(
t
__future__RRRt__all__RRRRRRR	R
(((sC/usr/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.pytsPKe[packaging/_structures.pyonu[
abc@`s^ddlmZmZmZdefdYZeZdefdYZeZdS(i(tabsolute_importtdivisiontprint_functiontInfinitycB`sYeZdZdZdZdZdZdZdZdZ	dZ
RS(	cC`sdS(NR((tself((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__repr__	scC`stt|S(N(thashtrepr(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__hash__scC`stS(N(tFalse(Rtother((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__lt__scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__le__scC`st||jS(N(t
isinstancet	__class__(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__eq__scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__ne__scC`stS(N(tTrue(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__gt__scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__ge__scC`stS(N(tNegativeInfinity(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyt__neg__!s(t__name__t
__module__RRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyRs								RcB`sYeZdZdZdZdZdZdZdZdZ	dZ
RS(	cC`sdS(Ns	-Infinity((R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR)scC`stt|S(N(RR(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR,scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR/scC`stS(N(R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR2scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR5scC`st||jS(N(R
R(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR8scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR;scC`stS(N(R	(RR
((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR>scC`stS(N(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyRAs(RRRRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyR's								N(t
__future__RRRtobjectRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyts	PKe[b\\packaging/__init__.pyonu[
abc@`sxddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS(
i(tabsolute_importtdivisiontprint_functioni(t
__author__t
__copyright__t	__email__t__license__t__summary__t	__title__t__uri__t__version__RRR	R
RRRRN(t
__future__RRRt	__about__RRRRRRR	R
t__all__(((sB/usr/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.pyts:PKe[SE77packaging/version.pycnu[
abc@`snddlmZmZmZddlZddlZddlZddlmZddddd	gZ	ej
d
ddd
dddgZdZde
fdYZdefdYZdefdYZejdejZidd6dd6dd6dd6dd
6ZdZdZdZdefd YZd!Zejd"Zd#Zd$ZdS(%i(tabsolute_importtdivisiontprint_functionNi(tInfinitytparsetVersiont
LegacyVersiontInvalidVersiontVERSION_PATTERNt_VersiontepochtreleasetdevtpretposttlocalcC`s-yt|SWntk
r(t|SXdS(s
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    N(RRR(tversion((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRs
cB`seZdZRS(sF
    An invalid version was found, users should refer to PEP 440.
    (t__name__t
__module__t__doc__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR$st_BaseVersioncB`sPeZdZdZdZdZdZdZdZdZ	RS(cC`s
t|jS(N(thasht_key(tself((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__hash__,scC`s|j|dS(NcS`s
||kS(N((tsto((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt0t(t_compare(Rtother((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__lt__/scC`s|j|dS(NcS`s
||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR3R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__le__2scC`s|j|dS(NcS`s
||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR6R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__eq__5scC`s|j|dS(NcS`s
||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR9R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__ge__8scC`s|j|dS(NcS`s
||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR<R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__gt__;scC`s|j|dS(NcS`s
||kS(N((RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR?R(R(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__ne__>scC`s&t|tstS||j|jS(N(t
isinstanceRtNotImplementedR(RRtmethod((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRAs(
RRRRR R!R"R#R$R(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR*s							cB`sneZdZdZdZedZedZedZedZ	edZ
RS(cC`s%t||_t|j|_dS(N(tstrt_versiont_legacy_cmpkeyR(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__init__JscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__str__NscC`sdjtt|S(Ns(tformattreprR((R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt__repr__QscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pytpublicTscC`s|jS(N(R)(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pytbase_versionXscC`sdS(N(tNone(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR\scC`stS(N(tFalse(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt
is_prerelease`scC`stS(N(R3(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pytis_postreleaseds(RRR+R,R/tpropertyR0R1RR4R5(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRHs			s(\d+ | [a-z]+ | \.| -)tctpreviewsfinal-t-trct@cc`sxxltj|D][}tj||}|s|dkrAqn|d dkrb|jdVqd|VqWdVdS(Nt.it
0123456789it*s*final(t_legacy_version_component_retsplitt_legacy_version_replacement_maptgettzfill(Rtpart((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyt_parse_version_partsrs
cC`sd}g}xt|jD]}|jdr|dkrjx'|rf|ddkrf|jqCWnx'|r|ddkr|jqmWn|j|qWt|}||fS(NiR>s*finals*final-t00000000(REtlowert
startswithtpoptappendttuple(RR
tpartsRD((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR*ss
    v?
    (?:
        (?:(?P[0-9]+)!)?                           # epoch
        (?P[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P
                                          # pre-release
            [-_\.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
cB`seZejdedejejBZdZdZ	dZ
edZedZ
edZedZed	ZRS(
s^\s*s\s*$cC`s[|jj|}|s0tdj|ntd|jdrZt|jdnddtd|jdjdDdt	|jd|jd	d
t	|jd|jdp|jd
dt	|jd|jddt
|jd|_t|jj
|jj|jj|jj|jj|jj|_dS(NsInvalid version: '{0}'R
iRcs`s|]}t|VqdS(N(tint(t.0ti((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sR<R
tpre_ltpre_nRtpost_ltpost_n1tpost_n2Rtdev_ltdev_nR(t_regextsearchRR-R	tgroupRMRKR@t_parse_letter_versiont_parse_local_versionR)t_cmpkeyR
RR
RRRR(RRtmatch((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR+s.*(!					cC`sdjtt|S(Ns(R-R.R((R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR/scC`sSg}|jjdkr7|jdj|jjn|jdjd|jjD|jjdk	r|jdjd|jjDn|jjdk	r|jdj|jjdn|jj	dk	r|jd	j|jj	dn|jj
dk	rF|jd
jdjd|jj
Dndj|S(Nis{0}!R<cs`s|]}t|VqdS(N(R((RNtx((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sRcs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	ss.post{0}is.dev{0}s+{0}cs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	s(R)R
RJR-tjoinRR
R2RRR(RRL((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR,s&)##,cC`st|jdddS(Nt+ii(R(R@(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR0
scC`sjg}|jjdkr7|jdj|jjn|jdjd|jjDdj|S(Nis{0}!R<cs`s|]}t|VqdS(N(R((RNR^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	sR(R)R
RJR-R_R(RRL((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR1s
&cC`s0t|}d|kr,|jdddSdS(NR`i(R(R@(Rtversion_string((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRscC`st|jjp|jjS(N(tboolR)RR
(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR4!scC`st|jjS(N(RbR)R(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR5%s(RRtretcompileRtVERBOSEt
IGNORECASERWR+R/R,R6R0R1RR4R5(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRs	#		
cC`s|r|dkrd}n|j}|dkr<d}n?|dkrQd}n*|d
krfd	}n|dkr{d}n|t|fS|r|rd}|t|fSdS(NitalphatatbetatbR7R
R8R:trevtrR(R7R
R8(RkRl(R2RGRM(tlettertnumber((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyRZ*s 					
s[\._-]cC`s-|dk	r)tdtj|DSdS(sR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    cs`s3|])}|js!|jn	t|VqdS(N(tisdigitRGRM(RNRD((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	RsN(R2RKt_local_version_seperatorsR@(R((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR[LscC`sttttjdt|}|dkr[|dkr[|dk	r[t}n|dkrpt}n|dkrt}n|dkrt}n|dkrt}ntd|D}||||||fS(NcS`s
|dkS(Ni((R^((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR`Rcs`s7|]-}t|tr$|dfn
t|fVqdS(RN(R%RMR(RNRO((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pys	s(RKtreversedtlistt	itertoolst	dropwhileR2R(R
RR
RRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyR\Ws&	$
	
	
	
(t
__future__RRRtcollectionsRsRct_structuresRt__all__t
namedtupleR	Rt
ValueErrorRtobjectRRRdReR?RARER*RRRZRpR[R\(((sA/usr/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyts0	!&		9k		PKe[Y__init__.pycnu[
abc@@sKdZddlmZddlZddlZddlZeZej	j
ej	jeZ
dZerGejej	je
dej	ej	(edededed	ed
ededed
ededededededededededededededededededed ed!ed"ed#ed$ed%ed&ed'ed(ed)ed*ed+ed,ed-ed.ed/ed0ndS(1s
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.

Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
i(tabsolute_importNcC@sdjt|}y t|ttddWntk
ry t|ttddWntk
ruqXtj|tj|<|jdd\}}t	tj||tj|nXdS(Ns{0}.{1}tlevelit.i(
tformatt__name__t
__import__tglobalstlocalstImportErrortsystmodulestrsplittsetattr(t
modulenamet
vendored_nametbasethead((s8/usr/lib/python2.7/site-packages/pip/_vendor/__init__.pytvendoreds 
 
	s*.whltcachecontroltcoloramatdistlibtdistrothtml5libtlockfiletsixs	six.movesssix.moves.urllibt	packagingspackaging.versionspackaging.specifierst
pkg_resourcestprogresstretryingtrequestssrequests.packagessrequests.packages.urllib3s&requests.packages.urllib3._collectionss$requests.packages.urllib3.connections(requests.packages.urllib3.connectionpools!requests.packages.urllib3.contribs*requests.packages.urllib3.contrib.ntlmpools+requests.packages.urllib3.contrib.pyopenssls$requests.packages.urllib3.exceptionss requests.packages.urllib3.fieldss"requests.packages.urllib3.fileposts"requests.packages.urllib3.packagess/requests.packages.urllib3.packages.ordered_dicts&requests.packages.urllib3.packages.sixs5requests.packages.urllib3.packages.ssl_match_hostnamesErequests.packages.urllib3.packages.ssl_match_hostname._implementations%requests.packages.urllib3.poolmanagers!requests.packages.urllib3.requests"requests.packages.urllib3.responsesrequests.packages.urllib3.utils)requests.packages.urllib3.util.connections&requests.packages.urllib3.util.requests'requests.packages.urllib3.util.responses$requests.packages.urllib3.util.retrys#requests.packages.urllib3.util.ssl_s&requests.packages.urllib3.util.timeouts"requests.packages.urllib3.util.url(t__doc__t
__future__Rtglobtos.pathtosR	tFalset	DEBUNDLEDtpathtabspathtdirnamet__file__t	WHEEL_DIRRtjoin(((s8/usr/lib/python2.7/site-packages/pip/_vendor/__init__.pytsh	)









































PKe[,؂؂
pyparsing.pyonu[
abci@sdZdZdZdZddlZddlmZddlZddl	Z	ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlmZyddlmZWn!ek
rddlmZnXydd	l
mZWn?ek
r=ydd	lmZWnek
r9eZnXnXd
ddd
ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgiZee	jds ZedtdskZere	jZ e!Z"e#Z$e!Z%e&e'e(e)e*ee+e,e-e.e/gZ0nre	j1Z e2Z3duZ%gZ0ddl4Z4xEdvj5D]7Z6ye0j7e8e4e6Wne9k
rZq$nXq$We:dwe3dxDZ;dyZ<dze=fd{YZ>ej?ej@ZAd|ZBeBd}ZCeAeBZDe#d~ZEdjFdejGDZHd!eIfdYZJd#eJfdYZKd%eJfdYZLd'eLfdYZMd*eIfdYZNde=fdYZOd&e=fdYZPe
jQjRePdZSdZTdZUdZVdZWdZXdZYddZZd(e=fdYZ[d0e[fdYZ\de\fdYZ]de\fdYZ^de\fdYZ_e_Z`e_e[_ade\fdYZbde_fdYZcdebfdYZddpe\fdYZed3e\fdYZfd+e\fdYZgd)e\fdYZhd
e\fdYZid2e\fdYZjde\fdYZkdekfdYZldekfdYZmdekfdYZnd.ekfdYZod-ekfdYZpd5ekfdYZqd4ekfdYZrd$e[fdYZsd
esfdYZtd esfdYZudesfdYZvdesfdYZwd"e[fdYZxdexfdYZydexfdYZzdexfdYZ{de{fdYZ|d6e{fdYZ}de=fdYZ~e~ZdexfdYZd,exfdYZdexfdYZdefdYZd1exfdYZdefdYZdefdYZdefdYZd/efdYZde=fdYZdZdedZedZdZdZdZdZeedZdZedZdZdZe]jdGZemjdMZenjdLZeojdeZepjddZefeEdddjdZegdjdZegdjdZeeBeBefeHddddxBegdejBZeeedeZe_dedjdee|eeBjddZdZdZdZdZdZedZedZdZdZdZdZe=e_ddZe>Ze=e_e=e_ededdZeZeegddjdZeegddjdZeegddegddBjdZee`dejjdZddeejdZedZedZedZeefeAeDdjd\ZZeedj5dZegddjFejdjdZdZeegddjdZegdjdZegd	jjd
ZegdjdZeegddeBjd
ZeZegdjdZee|efeHddeefde_denjjdZeeejeBddjd>ZdrfdYZedkrecdZecdZefeAeDdZeeddejeZeeejdZdeBZeeddejeZeeejdZededeedZejdejjdejjdejjd ddlZejjeejejjd!ndS("sS
pyparsing module - Classes and methods to define and execute parsing grammars

The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.

Here is a program to parse "Hello, World!" (or any greeting of the form 
C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
(L{'+'} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::

    from pyparsing import Word, alphas

    # define grammar of a greeting
    greet = Word(alphas) + "," + Word(alphas) + "!"

    hello = "Hello, World!"
    print (hello, "->", greet.parseString(hello))

The program outputs the following::

    Hello, World! -> ['Hello', ',', 'World', '!']

The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.

The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
object with named attributes.

The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
 - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
 - quoted strings
 - embedded comments
s2.1.10s07 Oct 2016 01:31 UTCs*Paul McGuire iN(tref(tdatetime(tRLock(tOrderedDicttAndtCaselessKeywordtCaselessLiteralt
CharsNotIntCombinetDicttEachtEmptyt
FollowedBytForwardt
GoToColumntGrouptKeywordtLineEndt	LineStarttLiteralt
MatchFirsttNoMatchtNotAnyt	OneOrMoretOnlyOncetOptionaltOrtParseBaseExceptiontParseElementEnhancetParseExceptiontParseExpressiontParseFatalExceptiontParseResultstParseSyntaxExceptiont
ParserElementtQuotedStringtRecursiveGrammarExceptiontRegextSkipTot	StringEndtStringStarttSuppresstTokentTokenConvertertWhitetWordtWordEndt	WordStartt
ZeroOrMoret	alphanumstalphast
alphas8bittanyCloseTagt
anyOpenTagt
cStyleCommenttcoltcommaSeparatedListtcommonHTMLEntitytcountedArraytcppStyleCommenttdblQuotedStringtdblSlashCommentt
delimitedListtdictOftdowncaseTokenstemptythexnumsthtmlCommenttjavaStyleCommenttlinetlineEndt	lineStarttlinenotmakeHTMLTagstmakeXMLTagstmatchOnlyAtColtmatchPreviousExprtmatchPreviousLiteralt
nestedExprtnullDebugActiontnumstoneOftopAssoctoperatorPrecedencet
printablestpunc8bittpythonStyleCommenttquotedStringtremoveQuotestreplaceHTMLEntitytreplaceWitht
restOfLinetsglQuotedStringtsranget	stringEndtstringStartttraceParseActiont
unicodeStringtupcaseTokenst
withAttributet
indentedBlocktoriginalTextFortungroupt
infixNotationtlocatedExprt	withClasst
CloseMatchttokenMaptpyparsing_commoniicCs}t|tr|Syt|SWnUtk
rxt|jtjd}td}|jd|j	|SXdS(sDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
           then < returns the unicode object | encodes it with the default encoding | ... >.
        txmlcharrefreplaces&#\d+;cSs#dtt|ddd!dS(Ns\uiii(thextint(tt((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyttN(
t
isinstancetunicodetstrtUnicodeEncodeErrortencodetsystgetdefaultencodingR%tsetParseActionttransformString(tobjtrett
xmlcharref((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_ustrs
s6sum len sorted reversed list tuple set any all min maxccs|]}|VqdS(N((t.0ty((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	sicCsRd}ddjD}x/t||D]\}}|j||}q,W|S(s/Escape &, <, >, ", ', etc. in a string of data.s&><"'css|]}d|dVqdS(t&t;N((Rts((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	ssamp gt lt quot apos(tsplittziptreplace(tdatatfrom_symbolst
to_symbolstfrom_tto_((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_xml_escapes
t
_ConstantscBseZRS((t__name__t
__module__(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRst
0123456789tABCDEFabcdefi\Rrccs$|]}|tjkr|VqdS(N(tstringt
whitespace(Rtc((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	scBs_eZdZdd
d
dZedZdZdZdZ	ddZ
d	ZRS(s7base exception class for all parsing runtime exceptionsicCs[||_|dkr*||_d|_n||_||_||_|||f|_dS(NRr(tloctNonetmsgtpstrt
parserElementtargs(tselfRRRtelem((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__init__s					cCs||j|j|j|jS(s
        internal factory method to simplify creating one type of ParseException 
        from another - avoids having __init__ signature conflicts among subclasses
        (RRRR(tclstpe((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_from_exceptionscCsm|dkrt|j|jS|dkr>t|j|jS|dkr]t|j|jSt|dS(ssupported attributes by name are:
            - lineno - returns the line number of the exception text
            - col - returns the column number of the exception text
            - line - returns the line containing the exception text
        RHR7tcolumnREN(R7R(RHRRR7REtAttributeError(Rtaname((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__getattr__scCs d|j|j|j|jfS(Ns"%s (at char %d), (line:%d, col:%d)(RRRHR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__str__scCs
t|S(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__repr__ss>!} ('-' operator) indicates that parsing is to stop 
       immediately because an unbacktrackable syntax error has been found(RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR!scBs eZdZdZdZRS(sZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs
||_dS(N(tparseElementTrace(RtparseElementList((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsd|jS(NsRecursiveGrammarException: %s(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR s(RRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR$s	t_ParseResultsWithOffsetcBs,eZdZdZdZdZRS(cCs||f|_dS(N(ttup(Rtp1tp2((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR$scCs|j|S(N(R(Rti((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__getitem__&scCst|jdS(Ni(treprR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR(scCs|jd|f|_dS(Ni(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	setOffset*s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR#s			cBseZdZd-d-eedZd-d-eeedZdZedZ	dZ
dZdZdZ
e
Zd	Zd
ZdZdZd
ZereZeZeZn-eZeZeZdZdZdZdZdZd-dZdZdZdZ dZ!dZ"dZ#dZ$dZ%dZ&dZ'ddZ(d Z)d!Z*d"Z+d-e,ded#Z-d$Z.d%Z/dd&ed'Z0d(Z1d)Z2d*Z3d+Z4d,Z5RS(.sI
    Structured parse results, to provide multiple means of access to the parsed data:
       - as a list (C{len(results)})
       - by list index (C{results[0], results[1]}, etc.)
       - by attribute (C{results.} - see L{ParserElement.setResultsName})

    Example::
        integer = Word(nums)
        date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))
        # equivalent form:
        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

        # parseString returns a ParseResults object
        result = date_str.parseString("1999/12/31")

        def test(s, fn=repr):
            print("%s -> %s" % (s, fn(eval(s))))
        test("list(result)")
        test("result[0]")
        test("result['month']")
        test("result.day")
        test("'month' in result")
        test("'minutes' in result")
        test("result.dump()", str)
    prints::
        list(result) -> ['1999', '/', '12', '/', '31']
        result[0] -> '1999'
        result['month'] -> '12'
        result.day -> '31'
        'month' in result -> True
        'minutes' in result -> False
        result.dump() -> ['1999', '/', '12', '/', '31']
        - day: 31
        - month: 12
        - year: 1999
    cCs/t||r|Stj|}t|_|S(N(Rstobjectt__new__tTruet_ParseResults__doinit(RttoklisttnametasListtmodaltretobj((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs
	cCs|jrt|_d|_d|_i|_||_||_|dkrTg}n||trp||_	n-||t
rt||_	n|g|_	t|_n|dk	r|r|sd|j|s(R(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_itervaluesscsfdjDS(Nc3s|]}||fVqdS(N((RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(R(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
_iteritemsscCst|jS(sVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).(RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytkeysscCst|jS(sXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).(Rt
itervalues(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytvaluesscCst|jS(sfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).(Rt	iteritems(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs
t|jS(sSince keys() returns an iterator, this method is helpful in bypassing
           code that looks for the existence of any defined results names.(tboolR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pythaskeysscOs|sdg}nxI|jD];\}}|dkrJ|d|f}qtd|qWt|dtst|dks|d|kr|d}||}||=|S|d}|SdS(s
        Removes and returns item at specified index (default=C{last}).
        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
        argument or an integer argument, it will use C{list} semantics
        and pop tokens from the list of parsed tokens. If passed a 
        non-integer argument (most likely a string), it will use C{dict}
        semantics and pop the corresponding value from any defined 
        results names. A second default return value argument is 
        supported, just as in C{dict.pop()}.

        Example::
            def remove_first(tokens):
                tokens.pop(0)
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

            label = Word(alphas)
            patt = label("LABEL") + OneOrMore(Word(nums))
            print(patt.parseString("AAB 123 321").dump())

            # Use pop() in a parse action to remove named result (note that corresponding value is not
            # removed from list form of results)
            def remove_LABEL(tokens):
                tokens.pop("LABEL")
                return tokens
            patt.addParseAction(remove_LABEL)
            print(patt.parseString("AAB 123 321").dump())
        prints::
            ['AAB', '123', '321']
            - LABEL: AAB

            ['AAB', '123', '321']
        itdefaultis-pop() got an unexpected keyword argument '%s'iN(RRRsRoR(RRtkwargsRRtindexR}tdefaultvalue((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpops"


cCs||kr||S|SdS(si
        Returns named result matching the given key, or if there is no
        such name, then returns the given C{defaultValue} or C{None} if no
        C{defaultValue} is specified.

        Similar to C{dict.get()}.
        
        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            result = date_str.parseString("1999/12/31")
            print(result.get("year")) # -> '1999'
            print(result.get("hour", "not specified")) # -> 'not specified'
            print(result.get("hour")) # -> None
        N((RtkeytdefaultValue((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsw|jj||x]|jjD]L\}}x=t|D]/\}\}}t||||k|| ['0', '123', '321']

            # use a parse action to insert the parse location in the front of the parsed results
            def insert_locn(locn, tokens):
                tokens.insert(0, locn)
            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
        N(RtinsertRRRR(RRtinsStrRRRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR2scCs|jj|dS(s
        Add single element to end of ParseResults list of elements.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            
            # use a parse action to compute the sum of the parsed integers, and add it to the end
            def append_sum(tokens):
                tokens.append(sum(map(int, tokens)))
            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
        N(Rtappend(Rtitem((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRFscCs0t|tr||7}n|jj|dS(s
        Add sequence of elements to end of ParseResults list of elements.

        Example::
            patt = OneOrMore(Word(alphas))
            
            # use a parse action to append the reverse of the matched strings, to make a palindrome
            def make_palindrome(tokens):
                tokens.extend(reversed([t[::-1] for t in tokens]))
                return ''.join(tokens)
            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
        N(RsR Rtextend(Rtitemseq((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs

cCs|j2|jjdS(s7
        Clear all elements and results names.
        N(RRtclear(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRfscCsy||SWntk
r dSX||jkr}||jkrR|j|ddStg|j|D]}|d^qcSndSdS(NRrii(RRRR (RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRms
+cCs|j}||7}|S(N(R(RtotherR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__add__{s
c	s|jrt|jfd}|jj}g|D]<\}}|D])}|t|d||df^qMq=}xJ|D]?\}}|||st](RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsRrcCsog}xb|jD]W}|r2|r2|j|nt|trT||j7}q|jt|qW|S(N(RRRsR t
_asStringListR(RtseptoutR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs5g|jD]'}t|tr+|jn|^q
S(s
        Returns the parse results as a nested list of matching tokens, all converted to strings.

        Example::
            patt = OneOrMore(Word(alphas))
            result = patt.parseString("sldkj lsdkj sldkj")
            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
            
            # Use asList() to create an actual list
            result_list = result.asList()
            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
        (RRsR R(Rtres((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscsGtr|j}n	|j}fdtfd|DS(s
        Returns the named parse results as a nested dictionary.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
            
            result_dict = result.asDict()
            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}

            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
            import json
            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
        csMt|trE|jr%|jSg|D]}|^q,Sn|SdS(N(RsR RtasDict(R|R(ttoItem(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs

 c3s'|]\}}||fVqdS(N((RRR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(tPY_3RRR(Rtitem_fn((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
		cCsPt|j}|jj|_|j|_|jj|j|j|_|S(sA
        Returns a new copy of a C{ParseResults} object.
        (R RRRRRR
R(RR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsd}g}td|jjD}|d}|sPd}d}d}nd	}	|d	k	rk|}	n|jr|j}	n|	s|rdSd}	n|||d|	dg7}x	t|jD]\}
}t|trI|
|kr||j	||
|o|d	k||g7}q||j	d	|o6|d	k||g7}qd	}|
|krh||
}n|s|rzqqd}nt
t|}
|||d|d|
d|dg	7}qW|||d|	dg7}dj|S(
s
        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
        s
css2|](\}}|D]}|d|fVqqdS(iN((RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s	s  RrtITEMtsgss
%s%s- %s: s  icss|]}t|tVqdS(N(RsR (Rtvv((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	sss
%s%s[%d]:
%s%s%sRr(
RRRRtsortedRRsR tdumpRtanyRR(RR$tdepthtfullRtNLRRRRR1((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR3Ps, B?cOstj|j||dS(s
        Pretty-printer for parsed results as a list, using the C{pprint} module.
        Accepts additional positional or keyword args as defined for the 
        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

        Example::
            ident = Word(alphas, alphanums)
            num = Word(nums)
            func = Forward()
            term = ident | num | Group('(' + func + ')')
            func <<= ident + Group(Optional(delimitedList(term)))
            result = func.parseString("fna a,b,(fnb c,d,200),100")
            result.pprint(width=40)
        prints::
            ['fna',
             ['a',
              'b',
              ['(', 'fnb', ['c', 'd', '200'], ')'],
              '100']]
        N(tpprintR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR8}scCsC|j|jj|jdk	r-|jp0d|j|jffS(N(RRRRRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__getstate__s
cCsm|d|_|d\|_}}|_i|_|jj||dk	r`t||_n	d|_dS(Nii(RRRRR
RRR(RtstateR/tinAccumNames((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__setstate__s
	cCs|j|j|j|jfS(N(RRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__getnewargs__scCs tt|t|jS(N(RRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsN(6RRRRRRRsRRRRRRRt__nonzero__RRRRRRRRRRRRRRRRRRRRR
RRRRRRRRRR!R-R0R3R8R9R<R=R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR -sh&	'		
														4												#	=		%-			
	cCsW|}d|ko#t|knr@||ddkr@dS||jdd|S(sReturns current column within a string, counting newlines as line separators.
   The first column is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
   on parsing strings containing C{}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   iis
(Rtrfind(RtstrgR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR7s
cCs|jdd|dS(sReturns current line number within a string, counting newlines as line separators.
   The first line is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
   on parsing strings containing C{}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   s
ii(tcount(RR@((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRHs
cCsR|jdd|}|jd|}|dkrB||d|!S||dSdS(sfReturns the line of text containing loc within a string, counting newlines as line separators.
       s
iiN(R?tfind(RR@tlastCRtnextCR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyREs
cCsAdt|dt|dt||t||fGHdS(NsMatch s at loc s(%d,%d)(RRHR7(tinstringRtexpr((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultStartDebugActionscCs'dt|dt|jGHdS(NsMatched s -> (RRuR(REtstartloctendlocRFttoks((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultSuccessDebugActionscCsdt|GHdS(NsException raised:(R(RERRFtexc((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_defaultExceptionDebugActionscGsdS(sG'Do-nothing' debug action, to suppress debugging output during parsing.N((R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyROsicstkrfdSdgtgtd dkrVdd}ddntj}tjd}|d	dd
}|d|d|ffd}d
}y"tdtdj}Wntk
rt	}nX||_|S(Ncs
|S(N((RtlRp(tfunc(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRriiiicSsJtdkrdnd}tjd||d|}|j|jfgS(	Niiiiitlimiti(iii(tsystem_versiont	tracebackt
extract_stacktfilenameRH(RPRt
frame_summary((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRSscSs2tj|d|}|d}|j|jfgS(NRPi(RRt
extract_tbRTRH(ttbRPtframesRU((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRVs
iRPiicsxy&|d}td<|SWqtk
rdrInAz:tjd}|dddd ksnWd~Xdkrdcd7Rt	__class__(ii(
tsingleArgBuiltinsRRQRRRSRVtgetattrRt	ExceptionRu(ROR[RSt	LINE_DIFFt	this_lineR]t	func_name((RVRZRORPR[R\s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_trim_aritys*
					
	cBseZdZdZeZedZedZedZ	dZ
dZedZe
dZd	Zd
ZdZdZd
ZdZe
dZdZe
e
dZdZdZdefdYZedFk	rdefdYZndefdYZiZe Z!ddgZ"e
e
dZ#eZ$edZ%eZ&eddZ'edZ(e)edZ*d Z+e)d!Z,e)ed"Z-d#Z.d$Z/d%Z0d&Z1d'Z2d(Z3d)Z4d*Z5d+Z6d,Z7d-Z8d.Z9d/Z:dFd0Z;d1Z<d2Z=d3Z>d4Z?d5Z@d6ZAe
d7ZBd8ZCd9ZDd:ZEd;ZFgd<ZGed=ZHd>ZId?ZJd@ZKdAZLdBZMe
dCZNe
dDe
e
edEZORS(Gs)Abstract base level parser element class.s 
	
cCs
|t_dS(s
        Overrides the default whitespace chars

        Example::
            # default whitespace chars are space,  and newline
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
            
            # change to just treat newline as significant
            ParserElement.setDefaultWhitespaceChars(" \t")
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
        N(R"tDEFAULT_WHITE_CHARS(tchars((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDefaultWhitespaceChars=s
cCs
|t_dS(s
        Set class to be used for inclusion of string literals into a parser.
        
        Example::
            # default literal class used is Literal
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']


            # change to Suppress
            ParserElement.inlineLiteralsUsing(Suppress)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
        N(R"t_literalStringClass(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytinlineLiteralsUsingLscCst|_d|_d|_d|_||_t|_t	j
|_t|_t
|_t
|_t|_t
|_t
|_t|_d|_t|_d|_d|_t|_t
|_dS(NRr(NNN(RtparseActionRt
failActiontstrReprtresultsNamet
saveAsListRtskipWhitespaceR"Rft
whiteCharstcopyDefaultWhiteCharsRtmayReturnEmptytkeepTabstignoreExprstdebugtstreamlinedt
mayIndexErrorterrmsgtmodalResultstdebugActionstretcallPreparset
callDuringTry(Rtsavelist((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRas(																cCsEtj|}|j|_|j|_|jrAtj|_n|S(s$
        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
        for the same parsing pattern, using copies of the original parse element.
        
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
            
            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
        prints::
            [5120, 100, 655360, 268435456]
        Equivalent form of C{expr.copy()} is just C{expr()}::
            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
        (RRkRuRrR"RfRq(Rtcpy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRxs

	cCs>||_d|j|_t|dr:|j|j_n|S(sf
        Define name for this expression, makes debugging and exception messages clearer.
        
        Example::
            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
        s	Expected t	exception(RRyRRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetNames
	cCsE|j}|jdr.|d }t}n||_||_|S(sP
        Define name for referencing matching tokens as a nested attribute
        of the returned parse results.
        NOTE: this returns a *copy* of the original C{ParserElement} object;
        this is so that the client can define a basic element, such as an
        integer, and reference it in multiple places with different names.

        You can also set results names using the abbreviated syntax,
        C{expr("name")} in place of C{expr.setResultsName("name")} - 
        see L{I{__call__}<__call__>}.

        Example::
            date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))

            # equivalent form:
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
        t*i(RtendswithRRnRz(RRtlistAllMatchestnewself((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetResultsNames
		
csa|r9|jttfd}|_||_n$t|jdr]|jj|_n|S(sMethod to invoke the Python pdb debugger when this element is
           about to be parsed. Set C{breakFlag} to True to enable, False to
           disable.
        cs)ddl}|j||||S(Ni(tpdbt	set_trace(RERt	doActionstcallPreParseR(t_parseMethod(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytbreakers
t_originalParseMethod(t_parseRRR(Rt	breakFlagR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetBreaks		cOs7tttt||_|jdt|_|S(s
        Define action to perform when successfully matching parse element definition.
        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
         - s   = the original string being parsed (see note below)
         - loc = the location of the matching substring
         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
        If the functions in fns modify the tokens, they can return them as the return
        value from fn, and the modified list of tokens will replace the original.
        Otherwise, fn does not need to return any value.

        Optional keyword arguments:
         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

        Note: the default parsing behavior is to expand tabs in the input string
        before starting the parsing process.  See L{I{parseString}} for more information
        on parsing strings containing C{}s, and suggested methods to maintain a
        consistent view of the parsed string, the parse location, and line and column
        positions within the parsed string.
        
        Example::
            integer = Word(nums)
            date_str = integer + '/' + integer + '/' + integer

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

            # use parse action to convert to ints at parse time
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            date_str = integer + '/' + integer + '/' + integer

            # note that integer fields are now ints, not strings
            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
        R~(RtmapReRkRRR~(RtfnsR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRzs"cOsF|jtttt|7_|jp<|jdt|_|S(s
        Add parse action to expression's list of parse actions. See L{I{setParseAction}}.
        
        See examples in L{I{copy}}.
        R~(RkRRReR~RR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytaddParseActions$cs|jdd|jdtr*tntx3|D]+fd}|jj|q7W|jp~|jdt|_|S(sAdd a boolean predicate function to expression's list of parse actions. See 
        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
        functions passed to C{addCondition} need to return boolean success/fail of the condition.

        Optional keyword arguments:
         - message = define a custom message to be used in the raised exception
         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
         
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            year_int = integer.copy()
            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
            date_str = year_int + '/' + integer + '/' + integer

            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
        tmessagesfailed user-defined conditiontfatalcs7tt|||s3||ndS(N(RRe(RRNRp(texc_typetfnR(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpasR~(RRRRRkRR~(RRRR((RRRs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytaddConditions
cCs
||_|S(sDefine action to perform if parsing fails at this expression.
           Fail acton fn is a callable function that takes the arguments
           C{fn(s,loc,expr,err)} where:
            - s = string being parsed
            - loc = location where expression match was attempted and failed
            - expr = the parse expression that failed
            - err = the exception thrown
           The function returns no value.  It may throw C{L{ParseFatalException}}
           if it is desired to stop parsing immediately.(Rl(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
setFailActions
	cCsnt}xa|rit}xN|jD]C}y)x"|j||\}}t}q+WWqtk
raqXqWq	W|S(N(RRRuRR(RRERt
exprsFoundtetdummy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt_skipIgnorables#s	
cCsp|jr|j||}n|jrl|j}t|}x-||krh|||krh|d7}q?Wn|S(Ni(RuRRpRqR(RRERtwttinstrlen((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytpreParse0s			cCs
|gfS(N((RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	parseImpl<scCs|S(N((RRERt	tokenlist((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	postParse?sc	Cs|j}|s|jr,|jdr?|jd|||n|rc|jrc|j||}n|}|}yUy|j|||\}}Wn/tk
rt|t||j	|nXWqt
k
r(}	|jdr|jd||||	n|jr"|j||||	nqXn|rP|jrP|j||}n|}|}|jsw|t|kry|j|||\}}Wqtk
rt|t||j	|qXn|j|||\}}|j|||}t
||jd|jd|j}
|jrf|s7|jrf|ryrxk|jD]`}||||
}|dk	rJt
||jd|jot|t
tfd|j}
qJqJWWqct
k
r}	|jdr|jd||||	nqcXqfxn|jD]`}||||
}|dk	rt
||jd|joMt|t
tfd|j}
qqWn|r|jdr|jd|||||
qn||
fS(NiiRRi(RvRlR{R}RRRRRRyRRxRR RnRoRzRkR~RRsR(RRERRRt	debuggingtprelocttokensStartttokensterrt	retTokensR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
_parseNoCacheCsp	

&
	

%$	

	
#cCsNy|j||dtdSWn)tk
rIt|||j|nXdS(NRi(RRRRRy(RRER((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyttryParses
cCs7y|j||Wnttfk
r.tSXtSdS(N(RRRRR(RRER((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcanParseNexts
t_UnboundedCachecBseZdZRS(csit|_fd}fd}fd}tj|||_tj|||_tj|||_dS(Ncsj|S(N(R(RR(tcachetnot_in_cache(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscs||})
         - define your parse action using the full C{(s,loc,toks)} signature, and
           reference the input string using the parse action's C{s} argument
         - explictly expand the tabs in your input string before calling
           C{parseString}
        
        Example::
            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
        iN(
R"RRwt
streamlineRuRtt
expandtabsRRRR'Rtverbose_stacktrace(RREtparseAllRRRtseRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytparseString#s$
	
		
ccs|js|jnx|jD]}|jq W|jsRt|j}nt|}d}|j}|j}t	j
d}	yx||kra|	|kray.|||}
|||
dt\}}Wntk
r|
d}qX||krT|	d7}	||
|fV|rK|||}
|
|kr>|}qQ|d7}q^|}q|
d}qWWn(t
k
r}t	jrq|nXdS(s
        Scan the input string for expression matches.  Each match will return the
        matching tokens, start location, and end location.  May be called with optional
        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
        C{overlap} is specified, then overlapping matches will be reported.

        Note that the start and end locations are reported relative to the string
        being parsed.  See L{I{parseString}} for more information on parsing
        strings with embedded tabs.

        Example::
            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
            print(source)
            for tokens,start,end in Word(alphas).scanString(source):
                print(' '*start + '^'*(end-start))
                print(' '*start + tokens[0])
        
        prints::
        
            sldjf123lsdjjkf345sldkjf879lkjsfd987
            ^^^^^
            sldjf
                    ^^^^^^^
                    lsdjjkf
                              ^^^^^^
                              sldkjf
                                       ^^^^^^
                                       lkjsfd
        iRiN(RwRRuRtRRRRRR"RRRRR(RREt
maxMatchestoverlapRRRt
preparseFntparseFntmatchesRtnextLocRtnextlocRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
scanStringUsB	
			


	
		c	Cs%g}d}t|_yx|j|D]}\}}}|j|||!|rt|trs||j7}qt|tr||7}q|j|n|}q(W|j||g|D]}|r|^q}djt	t
t|SWn(tk
r }t
jrq!|nXdS(sf
        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
        be returned from a parse action.  To use C{transformString}, define a grammar and
        attach a parse action to it that modifies the returned token list.
        Invoking C{transformString()} on a target string will then scan for matches,
        and replace the matched text patterns according to the logic in the parse
        action.  C{transformString()} returns the resulting transformed string.
        
        Example::
            wd = Word(alphas)
            wd.setParseAction(lambda toks: toks[0].title())
            
            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
        Prints::
            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
        iRrN(RRtRRRsR RRRRRt_flattenRR"R(	RRERtlastERpRRtoRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR{s(	

 	cCsey6tg|j||D]\}}}|^qSWn(tk
r`}tjrWqa|nXdS(s~
        Another extension to C{L{scanString}}, simplifying the access to the tokens found
        to match the given parse expression.  May be called with optional
        C{maxMatches} argument, to clip searching after 'n' matches are found.
        
        Example::
            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
            cap_word = Word(alphas.upper(), alphas.lower())
            
            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
        prints::
            ['More', 'Iron', 'Lead', 'Gold', 'I']
        N(R RRR"R(RRERRpRRRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsearchStrings6	c	csfd}d}xJ|j|d|D]3\}}}|||!V|rO|dVn|}q"W||VdS(s[
        Generator method to split a string using the given expression as a separator.
        May be called with optional C{maxsplit} argument, to limit the number of splits;
        and the optional C{includeSeparators} argument (default=C{False}), if the separating
        matching text should be included in the split results.
        
        Example::        
            punc = oneOf(list(".,;:/-!?"))
            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
        prints::
            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
        iRN(R(	RREtmaxsplittincludeSeparatorstsplitstlastRpRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
%
cCsdt|tr!tj|}nt|tsTtjdt|tdddSt	||gS(s
        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
        converts them to L{Literal}s by default.
        
        Example::
            greet = Word(alphas) + "," + Word(alphas) + "!"
            hello = "Hello, World!"
            print (hello, "->", greet.parseString(hello))
        Prints::
            Hello, World! -> ['Hello', ',', 'World', '!']
        s4Cannot combine element of type %s with ParserElementt
stackleveliN(
RsRR"RitwarningstwarnRt
SyntaxWarningRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s
cCs\t|tr!tj|}nt|tsTtjdt|tdddS||S(s]
        Implementation of + operator when left operand is not a C{L{ParserElement}}
        s4Cannot combine element of type %s with ParserElementRiN(	RsRR"RiRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
cCsmt|tr!tj|}nt|tsTtjdt|tdddSt	|t	j
|gS(sQ
        Implementation of - operator, returns C{L{And}} with error stop
        s4Cannot combine element of type %s with ParserElementRiN(RsRR"RiRRRRRRt
_ErrorStop(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__sub__s
cCs\t|tr!tj|}nt|tsTtjdt|tdddS||S(s]
        Implementation of - operator when left operand is not a C{L{ParserElement}}
        s4Cannot combine element of type %s with ParserElementRiN(	RsRR"RiRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rsub__ s
csEt|tr|d}}n-t|tr7|dd }|dd
kr_d|df}nt|dtr|dd
kr|ddkrtS|ddkrtS|dtSqLt|dtrt|dtr|\}}||8}qLtdt|dt|dntdt||dkrgtdn|dkrtdn||kodknrtdn|rfd	|r
|dkr|}qt	g||}qA|}n(|dkr.}nt	g|}|S(s
        Implementation of * operator, allows use of C{expr * 3} in place of
        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
        may also include C{None} as in:
         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
              to C{expr*n + L{ZeroOrMore}(expr)}
              (read as "at least n instances of C{expr}")
         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
              (read as "0 to n instances of C{expr}")
         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

        Note that C{expr*(None,n)} does not raise an exception if
        more than n exprs exist in the input stream; that is,
        C{expr*(None,n)} does not enforce a maximum number of expr
        occurrences.  If this behavior is desired, then write
        C{expr*(None,n) + ~expr}
        iiis7cannot multiply 'ParserElement' and ('%s','%s') objectss0cannot multiply 'ParserElement' and '%s' objectss/cannot multiply ParserElement by negative values@second tuple value must be greater or equal to first tuple values+cannot multiply ParserElement by 0 or (0,0)cs2|dkr$t|dStSdS(Ni(R(tn(tmakeOptionalListR(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR]sN(NN(
RsRottupleRR0RRRt
ValueErrorR(RR	tminElementstoptElementsR}((RRs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__mul__,sD#

&
) 	cCs
|j|S(N(R(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rmul__pscCsdt|tr!tj|}nt|tsTtjdt|tdddSt	||gS(sI
        Implementation of | operator - returns C{L{MatchFirst}}
        s4Cannot combine element of type %s with ParserElementRiN(
RsRR"RiRRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__or__ss
cCs\t|tr!tj|}nt|tsTtjdt|tdddS||BS(s]
        Implementation of | operator when left operand is not a C{L{ParserElement}}
        s4Cannot combine element of type %s with ParserElementRiN(	RsRR"RiRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ror__s
cCsdt|tr!tj|}nt|tsTtjdt|tdddSt	||gS(sA
        Implementation of ^ operator - returns C{L{Or}}
        s4Cannot combine element of type %s with ParserElementRiN(
RsRR"RiRRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__xor__s
cCs\t|tr!tj|}nt|tsTtjdt|tdddS||AS(s]
        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
        s4Cannot combine element of type %s with ParserElementRiN(	RsRR"RiRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rxor__s
cCsdt|tr!tj|}nt|tsTtjdt|tdddSt	||gS(sC
        Implementation of & operator - returns C{L{Each}}
        s4Cannot combine element of type %s with ParserElementRiN(
RsRR"RiRRRRRR
(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__and__s
cCs\t|tr!tj|}nt|tsTtjdt|tdddS||@S(s]
        Implementation of & operator when left operand is not a C{L{ParserElement}}
        s4Cannot combine element of type %s with ParserElementRiN(	RsRR"RiRRRRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rand__s
cCs
t|S(sE
        Implementation of ~ operator - returns C{L{NotAny}}
        (R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
__invert__scCs'|dk	r|j|S|jSdS(s

        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
        
        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
        passed as C{True}.
           
        If C{name} is omitted, same as calling C{L{copy}}.

        Example::
            # these are equivalent
            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
        N(RRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__call__s
cCs
t|S(s
        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
        cluttering up returned output.
        (R)(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsuppressscCs
t|_|S(s
        Disables the skipping of whitespace before matching the characters in the
        C{ParserElement}'s defined pattern.  This is normally only used internally by
        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
        (RRp(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytleaveWhitespaces	cCst|_||_t|_|S(s8
        Overrides the default whitespace chars
        (RRpRqRRr(RRg((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetWhitespaceCharss			cCs
t|_|S(s
        Overrides default behavior to expand C{}s to spaces before parsing the input string.
        Must be called before C{parseString} when the input grammar contains elements that
        match C{} characters.
        (RRt(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
parseWithTabss	cCsrt|trt|}nt|trR||jkrn|jj|qnn|jjt|j|S(s
        Define expression to be ignored (e.g., comments) while doing pattern
        matching; may be called repeatedly, to define multiple comment or other
        ignorable patterns.
        
        Example::
            patt = OneOrMore(Word(alphas))
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
            
            patt.ignore(cStyleComment)
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
        (RsRR)RuRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytignores
cCs1|p	t|pt|ptf|_t|_|S(sT
        Enable display of debugging messages while doing pattern matching.
        (RGRKRMR{RRv(RtstartActiont
successActiontexceptionAction((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDebugActions
s
			cCs)|r|jtttn	t|_|S(s
        Enable display of debugging messages while doing pattern matching.
        Set C{flag} to True to enable, False to disable.

        Example::
            wd = Word(alphas).setName("alphaword")
            integer = Word(nums).setName("numword")
            term = wd | integer
            
            # turn on debugging for wd
            wd.setDebug()

            OneOrMore(term).parseString("abc 123 xyz 890")
        
        prints::
            Match alphaword at loc 0(1,1)
            Matched alphaword -> ['abc']
            Match alphaword at loc 3(1,4)
            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
            Match alphaword at loc 7(1,8)
            Matched alphaword -> ['xyz']
            Match alphaword at loc 11(1,12)
            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
            Match alphaword at loc 15(1,16)
            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

        The output shown is that produced by the default debug actions - custom debug actions can be
        specified using L{setDebugActions}. Prior to attempting
        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
        which makes debugging and exception messages easier to understand - for instance, the default
        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
        (RRGRKRMRRv(Rtflag((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDebugs#	cCs|jS(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR@scCs
t|S(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRCscCst|_d|_|S(N(RRwRRm(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRFs		cCsdS(N((RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckRecursionKscCs|jgdS(sj
        Check defined expressions for valid structure, check for infinite recursive definitions.
        N(R(Rt
validateTrace((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytvalidateNscCsy|j}Wn5tk
rGt|d}|j}WdQXnXy|j||SWn(tk
r}tjr}q|nXdS(s
        Execute the parse expression on the given file or filename.
        If a filename is specified (instead of a file object),
        the entire file is opened, read, and closed before parsing.
        trN(treadRtopenRRR"R(Rtfile_or_filenameRt
file_contentstfRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	parseFileTs
	cCsdt|tr1||kp0t|t|kSt|trM|j|Stt||kSdS(N(RsR"tvarsRRtsuper(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__eq__hs
"
cCs||kS(N((RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ne__pscCstt|S(N(thashtid(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__hash__sscCs
||kS(N((RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__req__vscCs||kS(N((RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__rne__yscCs:y!|jt|d|tSWntk
r5tSXdS(s
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.
           
        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
            
        Example::
            expr = Word(nums)
            assert expr.matches("100")
        RN(RRRRR(Rt
testStringR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR|s


t#cCsyt|tr6tttj|jj}nt|trTt|}ng}g}t	}	x|D]}
|dk	r|j|
ts|r|
r|j
|
qmn|
sqmndj||
g}g}yQ|
jdd}
|j|
d|}|j
|jd||	o%|}	Wntk
r}
t|
trPdnd}d|
kr|j
t|
j|
|j
dt|
j|
dd	|n|j
d|
jd	||j
d
t|
|	o|}	|
}n<tk
r*}|j
dt||	o|}	|}nX|rX|rG|j
dndj|GHn|j
|
|fqmW|	|fS(
s3
        Execute the parse expression on a series of test strings, showing each
        test, the parsed results or where the parse failed. Quick and easy way to
        run a parse expression against a list of sample strings.
           
        Parameters:
         - tests - a list of separate test strings, or a multiline string of test strings
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
              string; pass None to disable comment filtering
         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
              if False, only dump nested list
         - printResults - (default=C{True}) prints test output to stdout
         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

        Returns: a (success, results) tuple, where success indicates that all tests succeeded
        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
        test's output
        
        Example::
            number_expr = pyparsing_common.number.copy()

            result = number_expr.runTests('''
                # unsigned integer
                100
                # negative integer
                -100
                # float with scientific notation
                6.02e23
                # integer with scientific notation
                1e-12
                ''')
            print("Success" if result[0] else "Failed!")

            result = number_expr.runTests('''
                # stray character
                100Z
                # missing leading digit before '.'
                -.100
                # too many '.'
                3.14.159
                ''', failureTests=True)
            print("Success" if result[0] else "Failed!")
        prints::
            # unsigned integer
            100
            [100]

            # negative integer
            -100
            [-100]

            # float with scientific notation
            6.02e23
            [6.02e+23]

            # integer with scientific notation
            1e-12
            [1e-12]

            Success
            
            # stray character
            100Z
               ^
            FAIL: Expected end of text (at char 3), (line:1, col:4)

            # missing leading digit before '.'
            -.100
            ^
            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

            # too many '.'
            3.14.159
                ^
            FAIL: Expected end of text (at char 4), (line:1, col:5)

            Success

        Each test string must be on a single line. If you want to test a string that spans multiple
        lines, create a test like this::

            expr.runTest(r"this is a test\n of strings that spans \n 3 lines")
        
        (Note that this is a raw string literal, you must include the leading 'r'.)
        s
s\nRR6s(FATAL)Rrt it^sFAIL: sFAIL-EXCEPTION: N(RsRRRRuRtrstript
splitlinesRRRRRRRRRR3RRRERR7Ra(RttestsRtcommenttfullDumptprintResultstfailureTestst
allResultstcommentstsuccessRpRtresultRRRL((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytrunTestssNW'
+
,	
N(PRRRRfRRtstaticmethodRhRjRRRRRRRzRRRRRRRRRRRRRRRRRRRRRRRRRt_MAX_INTRR{RRR
RRRRRRRRRRRRRRRRRRRRRRRRRR	RR
RRRRR"(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR"8s			&	
		
	
		H			"2G	+					D																	
)									cBseZdZdZRS(sT
    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    cCstt|jdtdS(NR(RR*RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s(RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR*	scBseZdZdZRS(s,
    An empty token, will always match.
    cCs2tt|jd|_t|_t|_dS(NR(RRRRRRsRRx(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s		(RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	scBs#eZdZdZedZRS(s(
    A token that will never match.
    cCs;tt|jd|_t|_t|_d|_dS(NRsUnmatchable token(	RRRRRRsRRxRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR*	s
			cCst|||j|dS(N(RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR1	s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR&	s	cBs#eZdZdZedZRS(s
    Token to exactly match a specified string.
    
    Example::
        Literal('blah').parseString('blah')  # -> ['blah']
        Literal('blah').parseString('blahfooblah')  # -> ['blah']
        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    
    For case-insensitive matching, use L{CaselessLiteral}.
    
    For keyword matching (force word break before and after the matched string),
    use L{Keyword} or L{CaselessKeyword}.
    cCstt|j||_t||_y|d|_Wn0tk
rntj	dt
ddt|_nXdt
|j|_d|j|_t|_t|_dS(Nis2null string passed to Literal; use Empty() insteadRis"%s"s	Expected (RRRtmatchRtmatchLentfirstMatchCharRRRRRR^RRRyRRsRx(RtmatchString((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRC	s	
	

	cCsg|||jkrK|jdks7|j|j|rK||j|jfSt|||j|dS(Ni(R'R&t
startswithR%RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRV	s$(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR5	s
	cBsKeZdZedZdedZedZ	dZ
edZRS(s\
    Token to exactly match a specified string as a keyword, that is, it must be
    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    Accepts two optional constructor arguments in addition to the keyword string:
     - C{identChars} is a string of characters that would be valid identifier characters,
          defaulting to all alphanumerics + "_" and "$"
     - C{caseless} allows case-insensitive matching, default is C{False}.
       
    Example::
        Keyword("start").parseString("start")  # -> ['start']
        Keyword("start").parseString("starting")  # -> Exception

    For case-insensitive matching, use L{CaselessKeyword}.
    s_$cCstt|j|dkr+tj}n||_t||_y|d|_Wn't	k
r}t
jdtddnXd|j|_
d|j
|_t|_t|_||_|r|j|_|j}nt||_dS(Nis2null string passed to Keyword; use Empty() insteadRis"%s"s	Expected (RRRRtDEFAULT_KEYWORD_CHARSR%RR&R'RRRRRRyRRsRxtcaselesstuppert
caselessmatchRt
identChars(RR(R.R+((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq	s&	
				cCsb|jr||||j!j|jkrF|t||jkse|||jj|jkrF|dks||dj|jkrF||j|jfSn|||jkrF|jdks|j|j|rF|t||jks|||j|jkrF|dks2||d|jkrF||j|jfSt	|||j
|dS(Nii(R+R&R,R-RR.R%R'R)RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s	#9)$3#cCs%tt|j}tj|_|S(N(RRRR*R.(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	scCs
|t_dS(s,Overrides the default Keyword chars
        N(RR*(Rg((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytsetDefaultKeywordChars	sN(
RRRR1R*RRRRRRR#R/(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR^	s
	cBs#eZdZdZedZRS(sl
    Token to match a specified string, ignoring case of letters.
    Note: the matched results will always be in the case of the given
    match string, NOT the case of the input text.

    Example::
        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
        
    (Contrast with example for L{CaselessKeyword}.)
    cCsItt|j|j||_d|j|_d|j|_dS(Ns'%s's	Expected (RRRR,treturnStringRRy(RR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s	cCsS||||j!j|jkr7||j|jfSt|||j|dS(N(R&R,R%R0RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s#(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s
	cBs&eZdZddZedZRS(s
    Caseless version of L{Keyword}.

    Example::
        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
        
    (Contrast with example for L{CaselessLiteral}.)
    cCs#tt|j||dtdS(NR+(RRRR(RR(R.((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	scCs||||j!j|jkrp|t||jks\|||jj|jkrp||j|jfSt|||j|dS(N(R&R,R-RR.R%RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s#9N(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	scBs&eZdZddZedZRS(sx
    A variation on L{Literal} which matches "close" matches, that is, 
    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
     - C{match_string} - string to be matched
     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    
    The results from a successful parse will contain the matched text from the input string and the following named results:
     - C{mismatches} - a list of the positions within the match_string where mismatches were found
     - C{original} - the original match_string used to compare against the input string
    
    If C{mismatches} is an empty list, then the match was an exact match.
    
    Example::
        patt = CloseMatch("ATCATCGAATGGA")
        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

        # exact match
        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

        # close match allowing up to 2 mismatches
        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    icCs]tt|j||_||_||_d|j|jf|_t|_t|_	dS(Ns&Expected %r (with up to %d mismatches)(
RRjRRtmatch_stringt
maxMismatchesRyRRxRs(RR1R2((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s				cCs|}t|}|t|j}||kr|j}d}g}	|j}
xtt|||!|jD]J\}}|\}}
||
kro|	j|t|	|
krPqqoqoW|d}t|||!g}|j|d<|	|d<||fSnt|||j|dS(Niitoriginalt
mismatches(	RR1R2RRRR RRy(RRERRtstartRtmaxlocR1tmatch_stringlocR4R2ts_mtsrctmattresults((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR	s(		,




(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRj	s	cBs>eZdZddddeddZedZdZRS(s	
    Token for matching words composed of allowed character sets.
    Defined with string containing all allowed initial characters,
    an optional string containing allowed body characters (if omitted,
    defaults to the initial character set), and an optional minimum,
    maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction. An optional
    C{excludeChars} parameter can list characters that might be found in 
    the input C{bodyChars} string; useful to define a word of all printables
    except for one or two characters, for instance.
    
    L{srange} is useful for defining custom character set strings for defining 
    C{Word} expressions, using range notation from regular expression character sets.
    
    A common mistake is to use C{Word} to match a specific literal string, as in 
    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    I{sets} of matchable characters. This expression would match "Add", "AAA",
    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    To match an exact literal string, use L{Literal} or L{Keyword}.

    pyparsing includes helper strings for building Words:
     - L{alphas}
     - L{nums}
     - L{alphanums}
     - L{hexnums}
     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
     - L{printables} (any non-whitespace character)

    Example::
        # a word composed of digits
        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
        
        # a word with a leading capital, and zero or more lowercase
        capital_word = Word(alphas.upper(), alphas.lower())

        # hostnames are alphanumeric, with leading alpha, and '-'
        hostname = Word(alphas, alphanums+'-')
        
        # roman numeral (not a strict parser, accepts invalid mix of characters)
        roman = Word("IVXLCDM")
        
        # any string of non-whitespace characters, except for ','
        csv_value = Word(printables, excludeChars=",")
    iicstt|jrcdjfd|D}|rcdjfd|D}qcn||_t||_|r||_t||_n||_t||_|dk|_	|dkrt
dn||_|dkr||_n	t
|_|dkr)||_||_nt||_d|j|_t|_||_d|j|jkr}|dkr}|dkr}|dkr}|j|jkrd	t|j|_net|jdkrd
tj|jt|jf|_n%dt|jt|jf|_|jrDd|jd|_nytj|j|_Wq}tk
ryd|_q}XndS(
NRrc3s!|]}|kr|VqdS(N((RR(texcludeChars(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	7
sc3s!|]}|kr|VqdS(N((RR(R<(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	9
siisZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitteds	Expected Rs[%s]+s%s[%s]*s	[%s][%s]*s\b(RR-RRt
initCharsOrigRt	initCharst
bodyCharsOrigt	bodyCharstmaxSpecifiedRtminLentmaxLenR$RRRyRRxt	asKeywordt_escapeRegexRangeCharstreStringRR|tescapetcompileRaR(RR>R@tmintmaxtexactRDR<((R<s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR4
sT%								:	
c
Cs|jr[|jj||}|s?t|||j|n|j}||jfS|||jkrt|||j|n|}|d7}t|}|j}||j	}t
||}x*||kr|||kr|d7}qWt}	|||jkrt
}	n|jrG||krG|||krGt
}	n|jr|dkrp||d|ks||kr|||krt
}	qn|	rt|||j|n||||!fS(Nii(R|R%RRytendtgroupR>RR@RCRIRRBRRARD(
RRERRR!R5Rt	bodycharsR6tthrowException((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRj
s6	
	
	%		<cCsytt|jSWntk
r*nX|jdkrd}|j|jkr}d||j||jf|_qd||j|_n|jS(NcSs&t|dkr|d dS|SdS(Nis...(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt
charsAsStr
ss	W:(%s,%s)sW:(%s)(RR-RRaRmRR=R?(RRP((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s
	(N(	RRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR-
s.6#cBsDeZdZeejdZddZedZ	dZ
RS(s
    Token for matching strings that match a given regular expression.
    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
    named parse results.

    Example::
        realnum = Regex(r"[+-]?\d+\.\d*")
        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    s[A-Z]icCs3tt|jt|tr|sAtjdtddn||_||_	y+t
j|j|j	|_
|j|_Wqt
jk
rtjd|tddqXnIt|tjr||_
t||_|_||_	ntdt||_d|j|_t|_t|_dS(sThe parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.s0null string passed to Regex; use Empty() insteadRis$invalid pattern (%s) passed to RegexsCRegex may only be constructed with a string or a compiled RE objects	Expected N(RR%RRsRRRRtpatterntflagsR|RHRFt
sre_constantsterrortcompiledREtypeRuRRRRyRRxRRs(RRQRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s.			


		cCs|jj||}|s6t|||j|n|j}|j}t|j}|rx|D]}||||eZdZddeededZedZdZRS(s
    Token for matching strings that are delimited by quoting characters.
    
    Defined with the following parameters:
        - quoteChar - string of one or more characters defining the quote delimiting string
        - escChar - character to escape quotes, typically backslash (default=C{None})
        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

    Example::
        qs = QuotedString('"')
        print(qs.searchString('lsjdf "This is the quote" sldjf'))
        complex_qs = QuotedString('{{', endQuoteChar='}}')
        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
        sql_qs = QuotedString('"', escQuote='""')
        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    prints::
        [['This is the quote']]
        [['This is the "quote"']]
        [['This is the quote with "embedded" quotes']]
    c	sttj|j}|sGtjdtddtn|dkr\|}n4|j}|stjdtddtn|_	t
|_|d_|_
t
|_|_|_|_|_|rTtjtjB_dtjj	tj
d|dk	rDt|pGdf_nPd_dtjj	tj
d|dk	rt|pdf_t
j
d	krjd
djfdtt
j
d	dd
Dd7_n|r*jdtj|7_n|rhjdtj|7_tjjd_njdtjj
7_y+tjjj_j_Wn4tj k
rtjdjtddnXt!_"dj"_#t$_%t&_'dS(Ns$quoteChar cannot be the empty stringRis'endQuoteChar cannot be the empty stringis%s(?:[^%s%s]Rrs%s(?:[^%s\n\r%s]is|(?:s)|(?:c3s<|]2}dtjj| tj|fVqdS(s%s[^%s]N(R|RGtendQuoteCharRE(RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	/sit)s|(?:%s)s|(?:%s.)s(.)s)*%ss$invalid pattern (%s) passed to Regexs	Expected ((RR#RRRRRtSyntaxErrorRt	quoteCharRtquoteCharLentfirstQuoteCharRXtendQuoteCharLentescChartescQuotetunquoteResultstconvertWhitespaceEscapesR|t	MULTILINEtDOTALLRRRGRERQRRtescCharReplacePatternRHRFRSRTRRRyRRxRRs(RR[R_R`t	multilineRaRXRb((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsf		
					(	%E
	c	CsT|||jkr(|jj||p+d}|sOt|||j|n|j}|j}|jrJ||j	|j
!}t|trJd|kr|j
ridd6dd6dd6dd	6}x/|jD]\}}|j||}qWn|jr tj|jd
|}n|jrG|j|j|j}qGqJn||fS(Ns\s	s\ts
s\nss\fs
s\rs\g<1>(R]R|R%RRRyRLRMRaR\R^RsRRbRRR_RReR`RX(	RRERRR!R}tws_maptwslittwschar((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRGs*.	
		!cCs]ytt|jSWntk
r*nX|jdkrVd|j|jf|_n|jS(Ns.quoted string, starting with %s ending with %s(RR#RRaRmRR[RX(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRjs
N(	RRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR#
sA#cBs5eZdZddddZedZdZRS(s
    Token for matching words composed of characters I{not} in a given set (will
    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    Defined with string containing all disallowed characters, and an optional
    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction.

    Example::
        # define a comma-separated-value as anything that is not a ','
        csv_value = CharsNotIn(',')
        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    prints::
        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    iicCstt|jt|_||_|dkr@tdn||_|dkra||_n	t	|_|dkr||_||_nt
||_d|j|_|jdk|_
t|_dS(Nisfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedis	Expected (RRRRRptnotCharsRRBRCR$RRRyRsRx(RRjRIRJRK((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs 					cCs|||jkr.t|||j|n|}|d7}|j}t||jt|}x*||kr|||kr|d7}qfW|||jkrt|||j|n||||!fS(Ni(RjRRyRIRCRRB(RRERRR5tnotcharstmaxlen((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
	cCsytt|jSWntk
r*nX|jdkryt|jdkrfd|jd |_qyd|j|_n|jS(Nis
!W:(%s...)s!W:(%s)(RRRRaRmRRRj(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRvscBsXeZdZidd6dd6dd6dd6d	d
6Zddd
d
dZedZRS(s
    Special matching class for matching whitespace.  Normally, whitespace is ignored
    by pyparsing grammars.  This class is included when some whitespace structures
    are significant.  Define with a string containing the whitespace characters to be
    matched; default is C{" \t\r\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    as defined for the C{L{Word}} class.
    sRss	ss
ss
sss 	
iicsttj|_jdjfdjDdjdjD_t_	dj_
|_|dkr|_n	t
_|dkr|_|_ndS(NRrc3s$|]}|jkr|VqdS(N(t
matchWhite(RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	scss|]}tj|VqdS(N(R,t	whiteStrs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	ss	Expected i(RR,RRmRRRqRRRsRyRBRCR$(RtwsRIRJRK((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	)				cCs|||jkr.t|||j|n|}|d7}||j}t|t|}x-||kr|||jkr|d7}qcW|||jkrt|||j|n||||!fS(Ni(RmRRyRCRIRRB(RRERRR5R6((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs

"(RRRRnRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR,s
t_PositionTokencBseZdZRS(cCs8tt|j|jj|_t|_t|_	dS(N(
RRpRR^RRRRsRRx(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	(RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRpscBs,eZdZdZdZedZRS(sb
    Token to advance to a specific column of input text; useful for tabular report scraping.
    cCs tt|j||_dS(N(RRRR7(Rtcolno((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCst|||jkrt|}|jrB|j||}nxE||kr||jrt|||jkr|d7}qEWn|S(Ni(R7RRuRtisspace(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	7cCs^t||}||jkr6t||d|n||j|}|||!}||fS(NsText not in expected column(R7R(RRERRtthiscoltnewlocR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs			cBs#eZdZdZedZRS(s
    Matches if current position is at the beginning of a line within the parse string
    
    Example::
    
        test = '''        AAA this line
        AAA and this line
          AAA but not this one
        B AAA and definitely not this one
        '''

        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
            print(t)
    
    Prints::
        ['AAA', ' this line']
        ['AAA', ' and this line']    

    cCs tt|jd|_dS(NsExpected start of line(RRRRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR&scCs;t||dkr|gfSt|||j|dS(Ni(R7RRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR*s
(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cBs#eZdZdZedZRS(sU
    Matches if current position is at the end of a line within the parse string
    cCs<tt|j|jtjjddd|_dS(Ns
RrsExpected end of line(RRRRR"RfRRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR3scCs|t|krK||dkr0|ddfSt|||j|n8|t|krk|dgfSt|||j|dS(Ns
i(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR8s(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR/s	cBs#eZdZdZedZRS(sM
    Matches if current position is at the beginning of the parse string
    cCs tt|jd|_dS(NsExpected start of text(RR(RRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRGscCsL|dkrB||j|dkrBt|||j|qBn|gfS(Ni(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRKs(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR(Cs	cBs#eZdZdZedZRS(sG
    Matches if current position is at the end of the parse string
    cCs tt|jd|_dS(NsExpected end of text(RR'RRy(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRVscCs|t|kr-t|||j|nT|t|krM|dgfS|t|kri|gfSt|||j|dS(Ni(RRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZs
(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR'Rs	cBs&eZdZedZedZRS(sp
    Matches if the current position is at the beginning of a Word, and
    is not preceded by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    the string being parsed, or at the beginning of a line.
    cCs/tt|jt||_d|_dS(NsNot at the start of a word(RR/RRt	wordCharsRy(RRu((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRlscCs^|dkrT||d|jks6|||jkrTt|||j|qTn|gfS(Nii(RuRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqs
(RRRRTRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR/dscBs&eZdZedZedZRS(sZ
    Matches if the current position is at the end of a Word, and
    is not followed by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    the string being parsed, or at the end of a line.
    cCs8tt|jt||_t|_d|_dS(NsNot at the end of a word(RR.RRRuRRpRy(RRu((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cCsvt|}|dkrl||krl|||jksN||d|jkrlt|||j|qln|gfS(Nii(RRuRRy(RRERRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs(RRRRTRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR.xscBsqeZdZedZdZdZdZdZdZ	dZ
edZgd	Zd
Z
RS(s^
    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
    cCstt|j|t|tr4t|}nt|tr[tj|g|_	nt|t
jrt|}td|Drt
tj|}nt||_	n3yt||_	Wntk
r|g|_	nXt|_dS(Ncss|]}t|tVqdS(N(RsR(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(RRRRsRRRR"RitexprsRtIterabletallRRRR}(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
cCs|j|S(N(Rv(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs|jj|d|_|S(N(RvRRRm(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cCsPt|_g|jD]}|j^q|_x|jD]}|jq8W|S(s~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
           all contained expressions.(RRpRvRR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
	%cCst|trb||jkrtt|j|x(|jD]}|j|jdq>Wqn>tt|j|x%|jD]}|j|jdqW|S(Ni(RsR)RuRRRRv(RR	R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsfytt|jSWntk
r*nX|jdkr_d|jjt|j	f|_n|jS(Ns%s:(%s)(
RRRRaRmRR^RRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
%cCswtt|jx|jD]}|jqWt|jdkr`|jd}t||jr|jr|jdkr|j
r|j|jdg|_d|_|j|jO_|j
|j
O_
n|jd}t||jr`|jr`|jdkr`|j
r`|jd |j|_d|_|j|jO_|j
|j
O_
q`ndt||_|S(Niiiis	Expected (RRRRvRRsR^RkRnRRvRmRsRxRRy(RRR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs0


	


	cCstt|j||}|S(N(RRR(RRRR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs@||g}x|jD]}|j|qW|jgdS(N(RvRR(RRttmpR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs>tt|j}g|jD]}|j^q|_|S(N(RRRRv(RR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs%(RRRRRRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs						
	"cBsWeZdZdefdYZedZedZdZdZ	dZ
RS(s

    Requires all given C{ParseExpression}s to be found in the given order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'+'} operator.
    May also be constructed using the C{'-'} operator, which will suppress backtracking.

    Example::
        integer = Word(nums)
        name_expr = OneOrMore(Word(alphas))

        expr = And([integer("id"),name_expr("name"),integer("age")])
        # more easily written as:
        expr = integer("id") + name_expr("name") + integer("age")
    RcBseZdZRS(cOs3ttj|j||d|_|jdS(Nt-(RRRRRR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s	(RRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
scCsltt|j||td|jD|_|j|jdj|jdj|_t	|_
dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	
si(RRRRxRvRsRRqRpRR}(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s
c	Cs?|jdj|||dt\}}t}x|jdD]}t|tjr`t}q<n|ry|j|||\}}Wqtk
rqtk
r}d|_
tj|qtk
rt|t
||j|qXn|j|||\}}|s$|jr<||7}q<q<W||fS(NiRi(RvRRRsRRRR!RRt
__traceback__RRRRyR(	RRERRt
resultlistt	errorStopRt
exprtokensR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s((
	
%cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR5
scCs@||g}x+|jD] }|j||jsPqqWdS(N(RvRRs(RRtsubRecCheckListR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR:
s

	cCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRt{Rcss|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	F
st}(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRA
s
*(RRRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs		cBsAeZdZedZedZdZdZdZ	RS(s
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the expression that matches the longest string will be used.
    May be constructed using the C{'^'} operator.

    Example::
        # construct Or using '^' operator
        
        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789"))
    prints::
        [['123'], ['3.1416'], ['789']]
    cCsNtt|j|||jrAtd|jD|_n	t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	\
s(RRRRvR4RsR(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRY
s	cCsd}d}g}x|jD]}y|j||}Wntk
rw}	d|	_|	j|kr|	}|	j}qqtk
rt||krt|t||j|}t|}qqX|j	||fqW|rh|j
ddxn|D]c\}
}y|j|||SWqtk
r`}	d|	_|	j|kra|	}|	j}qaqXqWn|dk	r|j|_|nt||d|dS(NiRcSs	|dS(Ni((tx((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqu
Rrs no defined alternatives to match(
RRvRRR{RRRRyRtsortRR(RRERRt	maxExcLoctmaxExceptionRRtloc2Rt_((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR`
s<	
		cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ixor__
scCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs ^ css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	
sR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s
*cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s(
RRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRK
s
&			cBsAeZdZedZedZdZdZdZ	RS(s
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the first one listed is the one that will match.
    May be constructed using the C{'|'} operator.

    Example::
        # construct MatchFirst using '|' operator
        
        # watch the order of expressions to match
        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

        # put more selective expression first
        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    cCsNtt|j|||jrAtd|jD|_n	t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	
s(RRRRvR4RsR(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s	c	Csd}d}x|jD]}y|j|||}|SWqtk
ro}|j|kr|}|j}qqtk
rt||krt|t||j|}t|}qqXqW|dk	r|j|_|nt||d|dS(Nis no defined alternatives to match(	RRvRRRRRRyR(	RRERRRRRR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s$
	cCs.t|tr!tj|}n|j|S(N(RsRR"RiR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt__ior__
scCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs | css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	
sR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s
*cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s(
RRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR
s			cBs8eZdZedZedZdZdZRS(sm
    Requires all given C{ParseExpression}s to be found, but in any order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'&'} operator.

    Example::
        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
        integer = Word(nums)
        shape_attr = "shape:" + shape_type("shape")
        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
        color_attr = "color:" + color("color")
        size_attr = "size:" + integer("size")

        # use Each (using operator '&') to accept attributes in any order 
        # (shape and posn are required, color and size are optional)
        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

        shape_spec.runTests('''
            shape: SQUARE color: BLACK posn: 100, 120
            shape: CIRCLE size: 50 color: BLUE posn: 50,80
            color:GREEN size:20 shape:TRIANGLE posn:20,40
            '''
            )
    prints::
        shape: SQUARE color: BLACK posn: 100, 120
        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
        - color: BLACK
        - posn: ['100', ',', '120']
          - x: 100
          - y: 120
        - shape: SQUARE


        shape: CIRCLE size: 50 color: BLUE posn: 50,80
        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
        - color: BLUE
        - posn: ['50', ',', '80']
          - x: 50
          - y: 80
        - shape: CIRCLE
        - size: 50


        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
        - color: GREEN
        - posn: ['20', ',', '40']
          - x: 20
          - y: 40
        - shape: TRIANGLE
        - size: 20
    cCsKtt|j||td|jD|_t|_t|_dS(Ncss|]}|jVqdS(N(Rs(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(	RR
RRxRvRsRRptinitExprGroups(RRvR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cCs4|jrLtd|jD|_g|jD]}t|tr/|j^q/}g|jD]%}|jr]t|tr]|^q]}|||_g|jD]}t|t	r|j^q|_
g|jD]}t|tr|j^q|_g|jD]$}t|tt	tfs|^q|_
|j
|j7_
t|_n|}|j
}|j}	g}
t}x|r_||	|j
|j}g}
x|D]}y|j||}Wntk
r|
j|qX|
j|jjt||||kr|j|q||	kr|	j|qqWt|
t|krut}ququW|rdjd|D}t||d|n|
g|jD]*}t|tr|j|	kr|^q7}
g}x6|
D].}|j|||\}}|j|qWt|tg}||fS(Ncss3|])}t|trt|j|fVqdS(N(RsRRRF(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	ss, css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	=ss*Missing one or more required elements (%s)(RRRvtopt1mapRsRRFRst	optionalsR0tmultioptionalsRt
multirequiredtrequiredRRRRRRRtremoveRRRtsumR (RRERRRtopt1topt2ttmpLocttmpReqdttmpOptt
matchOrdertkeepMatchingttmpExprstfailedtmissingR|R;tfinalResults((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsP	.5
117

	

"
>
cCsVt|dr|jS|jdkrOddjd|jDd|_n|jS(NRRs & css|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	PsR(RRRmRRRv(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRKs
*cCs3||g}x|jD]}|j|qWdS(N(RvR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRTs(RRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR

s
51		cBs_eZdZedZedZdZdZdZ	dZ
gdZdZRS(	sa
    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    cCstt|j|t|trattjtrItj|}qatjt	|}n||_
d|_|dk	r|j
|_
|j|_|j|j|j|_|j|_|j|_|jj|jndS(N(RRRRsRt
issubclassR"RiR*RRFRRmRxRsRRqRpRoR}RuR(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR^s		cCsG|jdk	r+|jj|||dtStd||j|dS(NRRr(RFRRRRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRpscCs>t|_|jj|_|jdk	r:|jjn|S(N(RRpRFRRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRvs
	cCst|trc||jkrtt|j||jdk	r`|jj|jdq`qn?tt|j||jdk	r|jj|jdn|S(Ni(RsR)RuRRRRFR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR}s cCs6tt|j|jdk	r2|jjn|S(N(RRRRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsV||kr"t||gn||g}|jdk	rR|jj|ndS(N(R$RFRR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
cCsA||g}|jdk	r0|jj|n|jgdS(N(RFRRR(RRRy((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsuytt|jSWntk
r*nX|jdkrn|jdk	rnd|jjt	|jf|_n|jS(Ns%s:(%s)(
RRRRaRmRRFR^RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
%(
RRRRRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZs				cBs#eZdZdZedZRS(s
    Lookahead matching of the given parse expression.  C{FollowedBy}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression matches at the current
    position.  C{FollowedBy} always returns a null token list.

    Example::
        # use FollowedBy to match a label only if it is followed by a ':'
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    prints::
        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    cCs#tt|j|t|_dS(N(RRRRRs(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs|jj|||gfS(N(RFR(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cBs,eZdZdZedZdZRS(s
    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression does I{not} match at the current
    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    always returns a null token list.  May be constructed using the '~' operator.

    Example::
        
    cCsBtt|j|t|_t|_dt|j|_	dS(NsFound unwanted token, (
RRRRRpRRsRRFRy(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs		cCs:|jj||r0t|||j|n|gfS(N(RFRRRy(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRs~{R(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
	t_MultipleMatchcBs eZddZedZRS(cCsftt|j|t|_|}t|trFtj|}n|dk	rY|nd|_
dS(N(RRRRRoRsRR"RiRt	not_ender(RRFtstopOntender((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs	cCs|jj}|j}|jdk	}|r9|jj}n|rO|||n||||dt\}}y|j}	xo|r|||n|	r|||}
n|}
|||
|\}}|s|jr~||7}q~q~WWnt	t
fk
rnX||fS(NR(RFRRRRRRRuRRR(RRERRtself_expr_parsetself_skip_ignorablestcheck_endert
try_not_enderRthasIgnoreExprsRt	tmptokens((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs,	N(RRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscBseZdZdZRS(s
    Repetition of one or more of the given expression.
    
    Parameters:
     - expr - expression that must match one or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: BLACK"
        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
        
        # could also be written as
        (attr_expr * (1,)).parseString(text).pprint()
    cCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRRs}...(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR!s
(RRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscBs/eZdZddZedZdZRS(sw
    Optional repetition of zero or more of the given expression.
    
    Parameters:
     - expr - expression that must match zero or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example: similar to L{OneOrMore}
    cCs)tt|j|d|t|_dS(NR(RR0RRRs(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR6scCsEy tt|j|||SWnttfk
r@|gfSXdS(N(RR0RRR(RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR:s cCsIt|dr|jS|jdkrBdt|jd|_n|jS(NRRs]...(RRRmRRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR@s
N(RRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR0*st
_NullTokencBs eZdZeZdZRS(cCstS(N(R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRJscCsdS(NRr((R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRMs(RRRR>R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRIs	cBs/eZdZedZedZdZRS(sa
    Optional matching of the given expression.

    Parameters:
     - expr - expression that must match zero or more times
     - default (optional) - value to be returned if the optional expression is not found.

    Example::
        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
        zip.runTests('''
            # traditional ZIP code
            12345
            
            # ZIP+4 form
            12101-0001
            
            # invalid ZIP
            98765-
            ''')
    prints::
        # traditional ZIP code
        12345
        ['12345']

        # ZIP+4 form
        12101-0001
        ['12101-0001']

        # invalid ZIP
        98765-
             ^
        FAIL: Expected end of text (at char 5), (line:1, col:6)
    cCsAtt|j|dt|jj|_||_t|_dS(NR(	RRRRRFRoRRRs(RRFR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRts	cCsy(|jj|||dt\}}Wnottfk
r|jtk	r|jjrt|jg}|j||jj ['3', '.', '1416']
        # will also erroneously match the following
        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

        real = Combine(Word(nums) + '.' + Word(nums))
        print(real.parseString('3.1416')) # -> ['3.1416']
        # no match when there are internal spaces
        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    RrcCsQtt|j||r)|jn||_t|_||_t|_dS(N(	RRRRtadjacentRRpt
joinStringR}(RRFRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRrs
			cCs6|jrtj||ntt|j||S(N(RR"RRR(RR	((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR|s	cCse|j}|2|tdj|j|jgd|j7}|jr]|jr]|gS|SdS(NRrR(RR RRRRzRnR(RRERRtretToks((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs1(RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRas
	cBs eZdZdZdZRS(s
    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

    Example::
        ident = Word(alphas)
        num = Word(nums)
        term = ident | num
        func = ident + Optional(delimitedList(term))
        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

        func = ident + Group(Optional(delimitedList(term)))
        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    cCs#tt|j|t|_dS(N(RRRRRo(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCs|gS(N((RRERR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs(RRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRs
	cBs eZdZdZdZRS(sW
    Converter to return a repetitive expression as a list, but also as a dictionary.
    Each element can also be referenced using the first token in the expression as its key.
    Useful for tabular report scraping when the first column can be used as a item key.

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        # print attributes as plain groups
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
        print(result.dump())
        
        # access named fields as dict entries, or output as dict
        print(result['shape'])        
        print(result.asDict())
    prints::
        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    See more examples at L{ParseResults} of accessing fields by results name.
    cCs#tt|j|t|_dS(N(RR	RRRo(RRF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRscCsTx9t|D]+\}}t|dkr1q
n|d}t|trct|dj}nt|dkrtd|||nX|S(ss
    Decorator for debugging parse actions. 
    
    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

    Example::
        wd = Word(alphas)

        @traceParseAction
        def remove_duplicate_chars(tokens):
            return ''.join(sorted(set(''.join(tokens)))

        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    prints::
        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
        <>entering %s(line: '%s', %d, %r)
s< ['aa', 'bb', 'cc']
        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    s [Rs]...N(RRR0RR)(RFtdelimtcombinetdlName((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR>9s
,!cstfd}|dkrBttjd}n|j}|jd|j|dt|jdt	dS(s:
    Helper to define a counted list of expressions.
    This helper defines a pattern of the form::
        integer expr expr expr...
    where the leading integer tells how many expr expressions follow.
    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    
    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

    Example::
        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

        # in this parser, the leading integer value is given in binary,
        # '10' indicating that 2 values are in the array
        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    cs;|d}|r,ttg|p5tt>gS(Ni(RRRA(RRNRpR(t	arrayExprRF(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcountFieldParseAction_s
-cSst|dS(Ni(Ro(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqdRrtarrayLenR~s(len) s...N(
R
RR-RPRzRRRRR(RFtintExprR((RRFs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR:Ls	
cCsMg}x@|D]8}t|tr8|jt|q
|j|q
W|S(N(RsRRRR(tLR}R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRks
csFtfd}|j|dtjdt|S(s*
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousLiteral(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    If this is not desired, use C{matchPreviousExpr}.
    Do I{not} use with packrat parsing enabled.
    csc|rTt|dkr'|d>q_t|j}td|D>nt>dS(Niicss|]}t|VqdS(N(R(Rttt((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(RRRRR(RRNRpttflat(trep(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcopyTokenToRepeatersR~s(prev) (R
RRRR(RFR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRMts

	
cs\t|j}|Kfd}|j|dtjdt|S(sS
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousExpr(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    the expressions are evaluated first, and then compared, so
    C{"1"} is compared with C{"10"}.
    Do I{not} use with packrat parsing enabled.
    cs8t|jfd}j|dtdS(Ncs7t|j}|kr3tdddndS(NRri(RRR(RRNRpttheseTokens(tmatchTokens(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytmustMatchTheseTokenssR~(RRRzR(RRNRpR(R(Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsR~s(prev) (R
RRRRR(RFte2R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRLs	
cCsUx$dD]}|j|t|}qW|jdd}|jdd}t|S(Ns\^-]s
s\ns	s\t(Rt_bslashR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyREs

c
sD|r!d}d}tnd}d}tg}t|tr]|j}n7t|tjr~t|}ntj	dt
dd|stSd}x|t|d	krV||}xt
||d	D]f\}}	||	|r
|||d	=Pq|||	r|||d	=|j||	|	}PqqW|d	7}qW|r|ryt|td
j|krtdd
jd|Djd
j|Stdjd|Djd
j|SWqtk
rtj	dt
ddqXntfd|Djd
j|S(s
    Helper to quickly define a set of alternative Literals, and makes sure to do
    longest-first testing when there is a conflict, regardless of the input order,
    but returns a C{L{MatchFirst}} for best performance.

    Parameters:
     - strs - a string of space-delimited literals, or a collection of string literals
     - caseless - (default=C{False}) - treat all literals as caseless
     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
          if creating a C{Regex} raises an exception)

    Example::
        comp_oper = oneOf("< = > <= >= !=")
        var = Word(alphas)
        number = Word(nums)
        term = var | number
        comparison_expr = term + comp_oper + term
        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    prints::
        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    cSs|j|jkS(N(R,(Rtb((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs|jj|jS(N(R,R)(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs
||kS(N((RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcSs
|j|S(N(R)(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrs6Invalid argument to oneOf, expected string or iterableRiiiRrs[%s]css|]}t|VqdS(N(RE(Rtsym((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	ss | t|css|]}tj|VqdS(N(R|RG(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	ss7Exception creating Regex for oneOf, building MatchFirstc3s|]}|VqdS(N((RR(tparseElementClass(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	s(RRRsRRRRwRRRRRRRRRR%RRaR(
tstrsR+tuseRegextisequaltmaskstsymbolsRtcurRR	((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRQsL						

!
!33
	cCsttt||S(s
    Helper to easily and clearly define a dictionary by specifying the respective patterns
    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    in the proper order.  The key pattern can include delimiting markers or punctuation,
    as long as they are suppressed, thereby leaving the significant key text.  The value
    pattern can include named results, so that the C{Dict} results can include named token
    fields.

    Example::
        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        attr_label = label
        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

        # similar to Dict, but simpler call format
        result = dictOf(attr_label, attr_value).parseString(text)
        print(result.dump())
        print(result['shape'])
        print(result.shape)  # object attribute access works too
        print(result.asDict())
    prints::
        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        SQUARE
        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    (R	R0R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR?s!cCs|tjd}|j}t|_|d||d}|rVd}n	d}|j||j|_|S(s
    Helper to return the original, untokenized text for a given expression.  Useful to
    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    revert separate tokens with intervening whitespace back to the original matching
    input text. By default, returns astring containing the original parsed text.  
       
    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    C{L{ParseResults}} containing any results names that were originally matched, and a 
    single token containing the original matched text from the input string.  So if 
    the expression passed to C{L{originalTextFor}} contains expressions with defined
    results names, you must set C{asString} to C{False} if you want to preserve those
    results name values.

    Example::
        src = "this is test  bold text  normal text "
        for tag in ("b","i"):
            opener,closer = makeHTMLTags(tag)
            patt = originalTextFor(opener + SkipTo(closer) + closer)
            print(patt.searchString(src)[0])
    prints::
        [' bold text ']
        ['text']
    cSs|S(N((RRRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq8Rrt_original_startt
_original_endcSs||j|j!S(N(RR(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq=RrcSs'||jd|jd!g|(dS(NRR(R(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytextractText?s(RRzRRR}Ru(RFtasStringt	locMarkertendlocMarkert	matchExprR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRe s		
cCst|jdS(sp
    Helper to undo pyparsing's default grouping of And expressions, even
    if all but one are non-empty.
    cSs|dS(Ni((Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqJRr(R+Rz(RF((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRfEscCsEtjd}t|d|d|jjdS(s
    Helper to decorate a returned token with its starting and ending locations in the input string.
    This helper adds the following results names:
     - locn_start = location where matched expression begins
     - locn_end = location where matched expression ends
     - value = the actual parsed results

    Be careful if the input text contains C{} characters, you may want to call
    C{L{ParserElement.parseWithTabs}}

    Example::
        wd = Word(alphas)
        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
            print(match)
    prints::
        [[0, 'ljsdf', 5]]
        [[8, 'lksdjjf', 15]]
        [[18, 'lkkjj', 23]]
    cSs|S(N((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq`Rrt
locn_startRtlocn_end(RRzRRR(RFtlocator((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRhLss\[]-*.$+^?()~ RKcCs|ddS(Nii((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqkRrs\\0?[xX][0-9a-fA-F]+cCs tt|djddS(Nis\0xi(tunichrRotlstrip(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqlRrs	\\0[0-7]+cCstt|dddS(Niii(RRo(RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqmRrR<s\]s\wRzRRtnegatetbodyRcsOdy-djfdtj|jDSWntk
rJdSXdS(s
    Helper to easily define string ranges for use in Word construction.  Borrows
    syntax from regexp '[]' string range definitions::
        srange("[0-9]")   -> "0123456789"
        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    The input string must be enclosed in []'s, and the returned string is the expanded
    character set joined into a single string.
    The values enclosed in the []'s may be:
     - a single character
     - an escaped character with a leading backslash (such as C{\-} or C{\]})
     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
         (C{\0x##} is also supported for backwards compatibility) 
     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    cSsKt|ts|Sdjdtt|dt|ddDS(NRrcss|]}t|VqdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	sii(RsR RRtord(tp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrRrc3s|]}|VqdS(N((Rtpart(t	_expanded(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	sN(Rt_reBracketExprRRRa(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyR]rs
	-
csfd}|S(st
    Helper method for defining parse actions that require matching at a specific
    column in the input text.
    cs2t||kr.t||dndS(Nsmatched token not at column %d(R7R(R@tlocnRJ(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	verifyCols((RR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRKscs
fdS(s
    Helper method for common parse actions that simply return a literal value.  Especially
    useful when used with C{L{transformString}()}.

    Example::
        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
        term = na | num
        
        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    csgS(N((RRNRp(treplStr(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRr((R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRZscCs|ddd!S(s
    Helper parse action for removing quotation marks from parsed quoted strings.

    Example::
        # by default, quotation marks are included in parsed results
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

        # use removeQuotes to strip quotation marks from parsed results
        quotedString.setParseAction(removeQuotes)
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    iii((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRXscsafd}y"tdtdj}Wntk
rSt}nX||_|S(sG
    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    args are passed, they are forwarded to the given function as additional arguments after
    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    parsed data to an integer using base 16.

    Example (compare the last to example in L{ParserElement.transformString}::
        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
        hex_ints.runTests('''
            00 11 22 aa FF 0a 0d 1a
            ''')
        
        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
        OneOrMore(upperword).runTests('''
            my kingdom for a horse
            ''')

        wd = Word(alphas).setParseAction(tokenMap(str.title))
        OneOrMore(wd).setParseAction(' '.join).runTests('''
            now is the winter of our discontent made glorious summer by this sun of york
            ''')
    prints::
        00 11 22 aa FF 0a 0d 1a
        [0, 17, 34, 170, 255, 10, 13, 26]

        my kingdom for a horse
        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

        now is the winter of our discontent made glorious summer by this sun of york
        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    cs g|D]}|^qS(N((RRNRpttokn(RRO(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRsRR^(R`RRaRu(RORRRd((RROs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRks 	
	cCst|jS(N(RR,(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCst|jS(N(Rtlower(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCs<t|tr+|}t|d|}n	|j}tttd}|rtjj	t
}td|dtt
t|td|tddtgjdj	d	td
}ndjdtD}tjj	t
t|B}td|dtt
t|j	tttd|tddtgjdj	d
td
}ttd|d
}|jddj|jddjjjd|}|jddj|jddjjjd|}||_||_||fS(sRInternal helper to construct opening and closing tag expressions, given a tag nameR+s_-:Rttagt=t/RRAcSs|ddkS(NiR((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrR Rrcss!|]}|dkr|VqdS(R N((RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys	scSs|ddkS(NiR((RRNRp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrsRLs(RsRRRR-R2R1R<RRzRXR)R	R0RRRRRRTRWR@Rt_LRttitleRRR(ttagStrtxmltresnamettagAttrNamettagAttrValuetopenTagtprintablesLessRAbracktcloseTag((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt	_makeTagss"	o{AA		cCs
t|tS(s 
    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

    Example::
        text = 'More info at the pyparsing wiki page'
        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
        a,a_end = makeHTMLTags("A")
        link_expr = a + SkipTo(a_end)("link_text") + a_end
        
        for link in link_expr.searchString(text):
            # attributes in the  tag (like "href" shown here) are also accessible as named results
            print(link.link_text, '->', link.href)
    prints::
        pyparsing -> http://pyparsing.wikispaces.com
    (RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRIscCs
t|tS(s
    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    tags only in the given upper/lower case.

    Example: similar to L{makeHTMLTags}
    (RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRJscsT|r|n|jgD]\}}||f^q#fd}|S(s<
    Helper to create a validating parse action to be used with start tags created
    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    with a required attribute value, to avoid false matches on common tags such as
    C{} or C{
}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 csx~D]v\}}||kr8t||d|n|tjkr|||krt||d||||fqqWdS(Nsno matching attribute s+attribute '%s' has value '%s', must be '%s'(RRct ANY_VALUE(RRNRtattrNamet attrValue(tattrs(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRRs   (R(RtattrDictRRR((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRcs 2  %cCs'|rd|nd}ti||6S(s Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this <div> has no class
''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 s%s:classtclass(Rc(t classnamet namespacet classattr((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRi\s t(RYcCs<t}||||B}xt|D]\}}|d d \}} } } | dkrdd|nd|} | dkr|d kst|dkrtdn|\} }ntj| }| tjkr| dkr t||t |t |}q| dkrx|d k rQt|||t |t ||}qt||t |t |}q| dkrt|| |||t || |||}qtdn+| tj kr| dkr)t |t st |}nt|j|t ||}q| dkr|d k rpt|||t |t ||}qt||t |t |}q| dkrt|| |||t || |||}qtdn td | r |j| n||j| |BK}|}q(W||K}|S( s Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] iis%s terms %s%s termis@if numterms=3, opExpr must be a tuple or list of two expressionsis6operator must be unary (1), binary (2), or ternary (3)s2operator must indicate right or left associativityN(N(R RRRRRRRtLEFTR RRtRIGHTRsRRFRz(tbaseExprtopListtlpartrparR}tlastExprRtoperDeftopExprtaritytrightLeftAssocRttermNametopExpr1topExpr2tthisExprR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRgsR;    '  /'   $  /'     s4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*t"s string enclosed in double quotess4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*t's string enclosed in single quotess*quotedString using single or double quotestusunicode string literalcCs!||krtdn|d krt|trt|trt|dkrt|dkr|d k rtt|t||tj ddj d}q|t j t||tj j d}q|d k r9tt|t |t |ttj ddj d}qttt |t |ttj ddj d}qtdnt}|d k r|tt|t||B|Bt|K}n.|tt|t||Bt|K}|jd ||f|S( s~ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] s.opening and closing strings cannot be the sameiRKcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq9RrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRq<RrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqBRrcSs|djS(Ni(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqFRrsOopening and closing arguments must be strings if no content expression is givensnested %s%s expressionN(RRRsRRRRRR"RfRzRARRR RR)R0R(topenertclosertcontentRR}((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRNs4:  $  $    5.c s5fd}fd}fd}ttjdj}ttj|jd}tj|jd}tj|jd} |rtt||t|t|t|| } n0tt|t|t|t|} |j t t| jdS( s Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] css|t|krdSt||}|dkro|dkrZt||dnt||dndS(Nisillegal nestingsnot a peer entry(RR7RR(RRNRptcurCol(t indentStack(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckPeerIndentscsEt||}|dkr/j|nt||ddS(Nisnot a subentry(R7RR(RRNRpR+(R,(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcheckSubIndentscsn|t|krdSt||}oH|dkoH|dks`t||dnjdS(Niisnot an unindent(RR7RR(RRNRpR+(R,(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt checkUnindents &s tINDENTRrtUNINDENTsindented block( RRRRR RzRRRRR( tblockStatementExprR,R$R-R.R/R7R0tPEERtUNDENTtsmExpr((R,s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRdQsN"8 $s#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]s[\0xa1-\0xbf\0xd7\0xf7]s_:sany tagsgt lt amp nbsp quot aposs><& "'s &(?PRs);scommon HTML entitycCstj|jS(sRHelper parser action to replace common HTML entities with their special characters(t_htmlEntityMapRtentity(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRYss/\*(?:[^*]|\*(?!/))*s*/sC style commentss HTML comments.*s rest of lines//(?:\\\n|[^\n])*s // commentsC++ style comments#.*sPython style comments t commaItemRcBseZdZeeZeeZee j dj eZ ee j dj eedZedj dj eZej edej ej dZejdeeeed jeBj d Zejeed j d j eZed j dj eZeeBeBjZedj dj eZeededj dZedj dZedj dZ e de dj dZ!ee de d8dee de d9j dZ"e"j#ddej d Z$e%e!e$Be"Bj d!j d!Z&ed"j d#Z'e(d$d%Z)e(d&d'Z*ed(j d)Z+ed*j d+Z,ed,j d-Z-e.je/jBZ0e(d.Z1e%e2e3d/e4ee5d0d/ee6d1jj d2Z7e8ee9j:e7Bd3d4j d5Z;e(ed6Z<e(ed7Z=RS(:s Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers}, L{reals}, L{scientific notation}) - common L{programming identifiers} - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - ISO8601 L{dates} and L{datetime} - L{UUID} - L{comma-separated list} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] tintegers hex integeris[+-]?\d+ssigned integerRtfractioncCs|d|dS(Nii((Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrRzs"fraction or mixed integer-fractions [+-]?\d+\.\d*s real numbers+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)s$real number with scientific notations[+-]?\d+\.?\d*([eE][+-]?\d+)?tfnumberRt identifiersK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}s IPv4 addresss[0-9a-fA-F]{1,4}t hex_integerRisfull IPv6 addressiis::sshort IPv6 addresscCstd|DdkS(Ncss'|]}tjj|rdVqdS(iN(Rlt _ipv6_partR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pys si(R(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrs::ffff:smixed IPv6 addresss IPv6 addresss:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}s MAC addresss%Y-%m-%dcsfd}|S(s Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] csPytj|djSWn+tk rK}t||t|nXdS(Ni(RtstrptimetdateRRRu(RRNRptve(tfmt(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytcvt_fns((RBRC((RBs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt convertToDatess%Y-%m-%dT%H:%M:%S.%fcsfd}|S(s Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] csJytj|dSWn+tk rE}t||t|nXdS(Ni(RR?RRRu(RRNRpRA(RB(s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRCs((RBRC((RBs9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pytconvertToDatetimess7(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?s ISO8601 dates(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?sISO8601 datetimes2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}tUUIDcCstjj|dS(s Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = 'More info at the
pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' i(Rlt_html_stripperR{(RRNR((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt stripHTMLTagss RR<s R8RRrscomma separated listcCst|jS(N(RR,(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRrcCst|jS(N(RR(Rp((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRqRr(ii(ii(>RRRRkRotconvertToIntegertfloattconvertToFloatR-RPRRzR9RBR=R%tsigned_integerR:RRRt mixed_integerRtrealtsci_realRtnumberR;R2R1R<t ipv4_addressR>t_full_ipv6_addresst_short_ipv6_addressRt_mixed_ipv6_addressRt ipv6_addresst mac_addressR#RDREt iso8601_datetiso8601_datetimetuuidR5R4RGRHRRRRTR,t _commasepitemR>RWRtcomma_separated_listRbR@(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyRlsL  '/-  ;&J+t__main__tselecttfroms_$RRtcolumnsRttablestcommandsK # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual s] 100 -100 +100 3.14159 6.02e23 1e-12 s 100 FF s6 12345678-1234-5678-1234-567812345678 (Rt __version__t__versionTime__t __author__RtweakrefRRRRxRR|RSRR8RRRRt_threadRt ImportErrort threadingRRt ordereddictRt__all__Rt version_infoRQRtmaxsizeR$RuRtchrRRRRR2treversedRRR4RxRIRJR_tmaxinttxrangeRt __builtin__RtfnameRR`RRRRRRtascii_uppercasetascii_lowercaseR2RPRBR1RRt printableRTRaRRRR!R$RR tMutableMappingtregisterR7RHRERGRKRMROReR"R*R RRRRiRRRRjR-R%R#RR,RpRRRR(R'R/R.RRRRR RR RRRR0RRRR&R RR+RRR R)RR`RR>R:RRMRLRERRQR?ReRfRhRRARGRFR_R^Rzt _escapedPunct_escapedHexChart_escapedOctChartUNICODEt _singleChart _charRangeRRR]RKRZRXRkRbR@R RIRJRcR RiRRRRRgRSR<R\RWRaRNRdR3RUR5R4RRR6RR9RYR6RCRR[R=R;RDRVRRZR8RlRt selectTokent fromTokentidentt columnNametcolumnNameListt columnSpect tableNamet tableNameListt simpleSQLR"RPR;R=RYRF(((s9/usr/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyt=s              *         8      @ & A=IG3pLOD|M &# @sQ,A ,    I # %  !4@    ,   ?  #   k%Z r  (, #8+    $     PKe[nڲFyFysix.pycnu[ abcA@@sKdZddlmZddlZddlZddlZddlZddlZdZdZ ej ddkZ ej ddkZ ej dd!dakZ e refZefZefZeZeZejZnefZeefZeejfZeZeZejjd r$edcZnVd efd YZ ye!e Wne"k rjedeZn XedgZ[ dZ#dZ$defdYZ%de%fdYZ&dej'fdYZ(de%fdYZ)defdYZ*e*e+Z,de(fdYZ-e)dddde)d d!d"d#d e)d$d!d!d%d$e)d&d'd"d(d&e)d)d'd*e)d+d!d"d,d+e)d-d.d.d/d-e)d0d.d.d-d0e)d1d'd"d2d1e)d3d'e rd4nd5d6e)d7d'd8e)d9d:d;d<e)ddde)d=d=d>e)d?d?d>e)d@d@d>e)d2d'd"d2d1e)dAd!d"dBdAe)dCd!d!dDdCe&d"d'e&dEdFe&dGdHe&dIdJdKe&dLdMdLe&dNdOdPe&dQdRdSe&dTdUdVe&dWdXdYe&dZd[d\e&d]d^d_e&d`dadbe&dcdddee&dfdgdhe&dididje&dkdkdje&dldldje&dmdmdne&dodpe&dqdre&dsdte&dudvdue&dwdxe&dydzd{e&d|d}d~e&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddde&ddd~e&ddde&ddde&ddde&de+dde&de+dde&de+de+de&ddde&ddde&dddg>Z.ejdkr;e.e&ddg7Z.nxJe.D]BZ/e0e-e/j1e/e2e/e&rBe,j3e/de/j1qBqBW[/e.e-_.e-e+dZ4e,j3e4dde(fdYZ5e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)d<dde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddgZ6x!e6D]Z/e0e5e/j1e/q0W[/e6e5_.e,j3e5e+dddde(fdYZ7e)ddde)ddde)dddgZ8x!e8D]Z/e0e7e/j1e/qW[/e8e7_.e,j3e7e+dddde(fdYZ9e)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)ddde)dddg!Z:x!e:D]Z/e0e9e/j1e/q W[/e:e9_.e,j3e9e+dddde(fdYZ;e)ddde)ddde)ddde)dddgZ<x!e<D]Z/e0e;e/j1e/q W[/e<e;_.e,j3e;e+d d d d e(fd YZ=e)dddgZ>x!e>D]Z/e0e=e/j1e/q; W[/e>e=_.e,j3e=e+ddddej'fdYZ?e,j3e?e+dddZ@dZAe r dZBdZCdZDdZEdZFdZGn$dZBdZCdZDd ZEd!ZFd"ZGy eHZIWneJk r= d#ZInXeIZHy eKZKWneJk rj d$ZKnXe r d%ZLejMZNd&ZOeZPn7d'ZLd(ZNd)ZOd*efd+YZPeKZKe#eLd,ejQeBZRejQeCZSejQeDZTejQeEZUejQeFZVejQeGZWe rd-ZXd.ZYd/ZZd0Z[ej\d1Z]ej\d2Z^ej\d3Z_nQd4ZXd5ZYd6ZZd7Z[ej\d8Z]ej\d9Z^ej\d:Z_e#eXd;e#eYd<e#eZd=e#e[d>e rd?Z`d@ZaebZcddldZdedjedAjfZg[dejhdZiejjZkelZmddlnZnenjoZoenjpZpdBZqej d d krdCZrdDZsq4dEZrdFZsnpdGZ`dHZaecZcebZgdIZidJZkejtejuevZmddloZoeojoZoZpdKZqdCZrdDZse#e`dLe#eadMdNZwdOZxdPZye reze4j{dQZ|ddRZ~ndddSZ|e|dTej d dhkre|dUn)ej d dikre|dVn dWZeze4j{dXdZedkrdYZnej d djkrDeZdZZne#e~d[ej dd!dkkrejejd\Zn ejZd]Zd^Zd_ZgZe+Zejd`dk rge_nejr7xOeejD]>\ZZeej+dkrej1e+kreje=PqqW[[nejje,dS(ls6Utilities for writing code that runs on Python 2 and 3i(tabsolute_importNs'Benjamin Peterson s1.10.0iiitjavaiitXcB@seZdZRS(cC@sdS(NiiI((tself((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__len__>s(t__name__t __module__R(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR<si?cC@s ||_dS(s Add documentation to a function.N(t__doc__(tfunctdoc((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt_add_docKscC@st|tj|S(s7Import module, returning the module after the last dot.(t __import__tsystmodules(tname((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt_import_modulePs t _LazyDescrcB@seZdZdZRS(cC@s ||_dS(N(R(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__init__XscC@sN|j}t||j|yt|j|jWntk rInX|S(N(t_resolvetsetattrRtdelattrt __class__tAttributeError(Rtobjttptresult((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__get__[s  (RRRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRVs t MovedModulecB@s&eZddZdZdZRS(cC@sJtt|j|tr=|dkr1|}n||_n ||_dS(N(tsuperRRtPY3tNonetmod(RRtoldtnew((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRis    cC@s t|jS(N(RR(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRrscC@s/|j}t||}t||||S(N(RtgetattrR(Rtattrt_moduletvalue((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt __getattr__us N(RRRRRR&(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRgs t _LazyModulecB@s eZdZdZgZRS(cC@s)tt|j||jj|_dS(N(RR'RRR(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR~scC@s3ddg}|g|jD]}|j^q7}|S(NRR(t_moved_attributesR(RtattrsR#((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__dir__s #(RRRR*R((((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR'|s  tMovedAttributecB@s eZdddZdZRS(cC@stt|j|trp|dkr1|}n||_|dkrd|dkr[|}qd|}n||_n'||_|dkr|}n||_dS(N(RR+RRRRR#(RRtold_modtnew_modtold_attrtnew_attr((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRs           cC@st|j}t||jS(N(RRR"R#(Rtmodule((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRsN(RRRRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR+st_SixMetaPathImportercB@s_eZdZdZdZdZd dZdZdZ dZ dZ e Z RS( s A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 cC@s||_i|_dS(N(Rt known_modules(Rtsix_module_name((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRs cG@s-x&|D]}||j|jd|(RR6((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt is_packagescC@s|j|dS(s;Return None Required, if is_package is implementedN(R>R(RR6((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytget_codes N( RRRRR7R8RR:R>RARDREt get_source(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR1s       t _MovedItemscB@seZdZgZRS(sLazy loading of moved objects(RRRRB(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRGst cStringIOtiotStringIOtfiltert itertoolstbuiltinstifiltert filterfalset ifilterfalsetinputt __builtin__t raw_inputtinternR tmaptimaptgetcwdtostgetcwdutgetcwdbtrangetxranget reload_modulet importlibtimptreloadtreducet functoolst shlex_quotetpipestshlextquotetUserDictt collectionstUserListt UserStringtziptizipt zip_longestt izip_longestt configparsert ConfigParsertcopyregtcopy_regtdbm_gnutgdbmsdbm.gnut _dummy_threadt dummy_threadthttp_cookiejart cookielibshttp.cookiejart http_cookiestCookies http.cookiest html_entitiesthtmlentitydefss html.entitiest html_parsert HTMLParsers html.parsert http_clientthttplibs http.clienttemail_mime_multipartsemail.MIMEMultipartsemail.mime.multiparttemail_mime_nonmultipartsemail.MIMENonMultipartsemail.mime.nonmultiparttemail_mime_textsemail.MIMETextsemail.mime.texttemail_mime_basesemail.MIMEBasesemail.mime.basetBaseHTTPServers http.servert CGIHTTPServertSimpleHTTPServertcPickletpickletqueuetQueuetreprlibtreprt socketservert SocketServert_threadtthreadttkintertTkinterttkinter_dialogtDialogstkinter.dialogttkinter_filedialogt FileDialogstkinter.filedialogttkinter_scrolledtextt ScrolledTextstkinter.scrolledtextttkinter_simpledialogt SimpleDialogstkinter.simpledialogt tkinter_tixtTixs tkinter.tixt tkinter_ttktttks tkinter.ttkttkinter_constantst Tkconstantsstkinter.constantst tkinter_dndtTkdnds tkinter.dndttkinter_colorchooserttkColorChooserstkinter.colorchooserttkinter_commondialogttkCommonDialogstkinter.commondialogttkinter_tkfiledialogt tkFileDialogt tkinter_fontttkFonts tkinter.fontttkinter_messageboxt tkMessageBoxstkinter.messageboxttkinter_tksimpledialogttkSimpleDialogt urllib_parses.moves.urllib_parses urllib.parset urllib_errors.moves.urllib_errors urllib.errorturllibs .moves.urllibturllib_robotparsert robotparsersurllib.robotparsert xmlrpc_clientt xmlrpclibs xmlrpc.clientt xmlrpc_servertSimpleXMLRPCServers xmlrpc.servertwin32twinregt_winregsmoves.s.movestmovestModule_six_moves_urllib_parsecB@seZdZRS(s7Lazy loading of moved objects in six.moves.urllib_parse(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR@st ParseResultturlparset SplitResulttparse_qst parse_qslt urldefragturljointurlsplitt urlunparset urlunsplitt quote_plustunquotet unquote_plust urlencodet splitquerytsplittagt splitusert uses_fragmentt uses_netloct uses_paramst uses_queryt uses_relativesmoves.urllib_parsesmoves.urllib.parsetModule_six_moves_urllib_errorcB@seZdZRS(s7Lazy loading of moved objects in six.moves.urllib_error(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRhstURLErrorturllib2t HTTPErrortContentTooShortErrors.moves.urllib.errorsmoves.urllib_errorsmoves.urllib.errortModule_six_moves_urllib_requestcB@seZdZRS(s9Lazy loading of moved objects in six.moves.urllib_request(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR|sturlopensurllib.requesttinstall_openert build_openert pathname2urlt url2pathnamet getproxiestRequesttOpenerDirectortHTTPDefaultErrorHandlertHTTPRedirectHandlertHTTPCookieProcessort ProxyHandlert BaseHandlertHTTPPasswordMgrtHTTPPasswordMgrWithDefaultRealmtAbstractBasicAuthHandlertHTTPBasicAuthHandlertProxyBasicAuthHandlertAbstractDigestAuthHandlertHTTPDigestAuthHandlertProxyDigestAuthHandlert HTTPHandlert HTTPSHandlert FileHandlert FTPHandlertCacheFTPHandlertUnknownHandlertHTTPErrorProcessort urlretrievet urlcleanupt URLopenertFancyURLopenert proxy_bypasss.moves.urllib.requestsmoves.urllib_requestsmoves.urllib.requestt Module_six_moves_urllib_responsecB@seZdZRS(s:Lazy loading of moved objects in six.moves.urllib_response(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRstaddbasesurllib.responset addclosehooktaddinfot addinfourls.moves.urllib.responsesmoves.urllib_responsesmoves.urllib.responset#Module_six_moves_urllib_robotparsercB@seZdZRS(s=Lazy loading of moved objects in six.moves.urllib_robotparser(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRstRobotFileParsers.moves.urllib.robotparsersmoves.urllib_robotparsersmoves.urllib.robotparsertModule_six_moves_urllibcB@sheZdZgZejdZejdZejdZejdZ ejdZ dZ RS(sICreate a six.moves.urllib namespace that resembles the Python 3 namespacesmoves.urllib_parsesmoves.urllib_errorsmoves.urllib_requestsmoves.urllib_responsesmoves.urllib_robotparsercC@sdddddgS(NtparseterrortrequesttresponseR((R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR*s( RRRRBt _importerR8RRRRRR*(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRss moves.urllibcC@stt|j|dS(sAdd an item to six.moves.N(RRGR(tmove((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytadd_movescC@s^ytt|WnFtk rYytj|=WqZtk rUtd|fqZXnXdS(sRemove item from six.moves.sno such move, %rN(RRGRRt__dict__R;(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt remove_moves  t__func__t__self__t __closure__t__code__t __defaults__t __globals__tim_functim_selft func_closuret func_codet func_defaultst func_globalscC@s |jS(N(tnext(tit((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytadvance_iterator scC@stdt|jDS(Ncs@s|]}d|jkVqdS(t__call__N(R (t.0tklass((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pys s(tanyttypet__mro__(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcallablescC@s|S(N((tunbound((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytget_unbound_functionscC@s|S(N((Rtcls((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcreate_unbound_methodscC@s|jS(N(R(R"((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR#"scC@stj|||jS(N(ttypest MethodTypeR(RR((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytcreate_bound_method%scC@stj|d|S(N(R&R'R(RR$((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR%(stIteratorcB@seZdZRS(cC@st|j|S(N(Rt__next__(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR-s(RRR(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR)+ss3Get the function out of a possibly unbound functioncK@st|j|S(N(titertkeys(tdtkw((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytiterkeys>scK@st|j|S(N(R+tvalues(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt itervaluesAscK@st|j|S(N(R+titems(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt iteritemsDscK@st|j|S(N(R+tlists(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt iterlistsGsR,R0R2cK@s |j|S(N(R/(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR/PscK@s |j|S(N(R1(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR1SscK@s |j|S(N(R3(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR3VscK@s |j|S(N(R5(R-R.((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR5Ystviewkeyst viewvaluest viewitemss1Return an iterator over the keys of a dictionary.s3Return an iterator over the values of a dictionary.s?Return an iterator over the (key, value) pairs of a dictionary.sBReturn an iterator over the (key, [values]) pairs of a dictionary.cC@s |jdS(Nslatin-1(tencode(ts((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytbkscC@s|S(N((R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytunss>BtassertCountEqualtassertRaisesRegexptassertRegexpMatchestassertRaisesRegext assertRegexcC@s|S(N((R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR;scC@st|jdddS(Ns\\s\\\\tunicode_escape(tunicodetreplace(R:((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR<scC@st|dS(Ni(tord(tbs((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytbyte2intscC@st||S(N(RE(tbufti((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt indexbytesstassertItemsEquals Byte literals Text literalcO@st|t||S(N(R"t_assertCountEqual(Rtargstkwargs((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR=scO@st|t||S(N(R"t_assertRaisesRegex(RRMRN((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR@scO@st|t||S(N(R"t _assertRegex(RRMRN((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRAstexeccC@sC|dkr|}n|j|k r9|j|n|dS(N(Rt __traceback__twith_traceback(RR%ttb((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytreraises   cB@sc|dkrBejd}|j}|dkr<|j}n~n|dkrW|}nddUdS(sExecute code in a namespace.isexec _code_ in _globs_, _locs_N(RR t _getframet f_globalstf_locals(t_code_t_globs_t_locs_tframe((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytexec_s      s9def reraise(tp, value, tb=None): raise tp, value, tb srdef raise_from(value, from_value): if from_value is None: raise value raise value from from_value sCdef raise_from(value, from_value): raise value from from_value cC@s |dS(N((R%t from_value((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt raise_fromstprintc @s|jdtjdkr%dSfd}t}|jdd}|dk rt|trpt}qt|tst dqn|jdd}|dk rt|trt}qt|tst dqn|rt dn|s0x*|D]}t|tr t}Pq q Wn|rQtd }td }n d }d }|dkrr|}n|dkr|}nx7t |D])\} }| r||n||qW||dS( s4The new-style print function for Python 2.4 and 2.5.tfileNc@st|tst|}nttrt|trjdk rtdd}|dkrrd}n|jj|}nj |dS(Nterrorststrict( R?t basestringtstrRaRCtencodingRR"R9twrite(tdataRb(tfp(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRgs  tsepssep must be None or a stringtendsend must be None or a strings$invalid keyword arguments to print()s t ( tpopR tstdoutRtFalseR?RCtTrueRet TypeErrort enumerate( RMRNRgt want_unicodeRjRktargtnewlinetspaceRI((Ris3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytprint_sL              cO@sW|jdtj}|jdt}t|||rS|dk rS|jndS(NRatflush(tgetR RnRmRot_printRRx(RMRNRiRx((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyRw s  sReraise an exception.c@sfd}|S(Nc@s(tj|}|_|S(N(Rbtwrapst __wrapped__(tf(tassignedtupdatedtwrapped(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytwrappers ((RR~RR((R~RRs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR{sc@s5dffdY}tj|ddiS(s%Create a base class with a metaclass.t metaclassc@seZfdZRS(c@s||S(N((R$Rt this_basesR-(tbasestmeta(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt__new__'s(RRR((RR(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR%sttemporary_class((RR(RRR((RRs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytwith_metaclass sc@sfd}|S(s6Class decorator for creating a class with a metaclass.c@s|jj}|jd}|dk rft|trE|g}nx|D]}|j|qLWn|jdd|jdd|j|j|S(Nt __slots__R t __weakref__( R tcopyRyRR?ReRmRt __bases__(R$t orig_varstslotst slots_var(R(s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyR.s   ((RR((Rs3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyt add_metaclass,s cC@sJtrFd|jkr+td|jn|j|_d|_n|S(s A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. t__str__sY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cS@s|jjdS(Nsutf-8(t __unicode__R9(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytJt(tPY2R t ValueErrorRRR(R((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pytpython_2_unicode_compatible<s t__spec__(iiIiIill(ii(ii(ii(ii(Rt __future__RRbRLtoperatorR R&t __author__t __version__t version_infoRRtPY34Ret string_typestintt integer_typesRt class_typest text_typetbytest binary_typetmaxsizetMAXSIZERdtlongt ClassTypeRCtplatformt startswithtobjectRtlent OverflowErrorR RRRt ModuleTypeR'R+R1RRRGR(R#RRR?R7RRt_urllib_parse_moved_attributesRt_urllib_error_moved_attributesRt _urllib_request_moved_attributesRt!_urllib_response_moved_attributesRt$_urllib_robotparser_moved_attributesRR R t _meth_funct _meth_selft _func_closuret _func_codet_func_defaultst _func_globalsRRt NameErrorR!R#R'R(R%R)t attrgettertget_method_functiontget_method_selftget_function_closuretget_function_codetget_function_defaultstget_function_globalsR/R1R3R5t methodcallerR6R7R8R;R<tchrtunichrtstructtStructtpacktint2bytet itemgetterRGtgetitemRJR+t iterbytesRIRJtBytesIORLRORPtpartialRVRER=R@RAR"RMR]RRUR_RwRztWRAPPER_ASSIGNMENTStWRAPPER_UPDATESR{RRRRBt __package__tglobalsRyRtsubmodule_search_locationst meta_pathRrRItimportertappend(((s3/usr/lib/python2.7/site-packages/pip/_vendor/six.pyts               >                                                                                 5         PKe[Y __init__.pyonu[ abc@@sKdZddlmZddlZddlZddlZeZej j ej j e Z dZerGejej je dej ej (edededed ed ed ed ed ededededededededededededededededededed ed!ed"ed#ed$ed%ed&ed'ed(ed)ed*ed+ed,ed-ed.ed/ed0ndS(1s pip._vendor is for vendoring dependencies of pip to prevent needing pip to depend on something external. Files inside of pip._vendor should be considered immutable and should only be updated to versions from upstream. i(tabsolute_importNcC@sdjt|}y t|ttddWntk ry t|ttddWntk ruqXtj|tj|<|jdd\}}t tj||tj|nXdS(Ns{0}.{1}tlevelit.i( tformatt__name__t __import__tglobalstlocalst ImportErrortsystmodulestrsplittsetattr(t modulenamet vendored_nametbasethead((s8/usr/lib/python2.7/site-packages/pip/_vendor/__init__.pytvendoreds    s*.whlt cachecontroltcoloramatdistlibtdistrothtml5libtlockfiletsixs six.movesssix.moves.urllibt packagingspackaging.versionspackaging.specifierst pkg_resourcestprogresstretryingtrequestssrequests.packagessrequests.packages.urllib3s&requests.packages.urllib3._collectionss$requests.packages.urllib3.connections(requests.packages.urllib3.connectionpools!requests.packages.urllib3.contribs*requests.packages.urllib3.contrib.ntlmpools+requests.packages.urllib3.contrib.pyopenssls$requests.packages.urllib3.exceptionss requests.packages.urllib3.fieldss"requests.packages.urllib3.fileposts"requests.packages.urllib3.packagess/requests.packages.urllib3.packages.ordered_dicts&requests.packages.urllib3.packages.sixs5requests.packages.urllib3.packages.ssl_match_hostnamesErequests.packages.urllib3.packages.ssl_match_hostname._implementations%requests.packages.urllib3.poolmanagers!requests.packages.urllib3.requests"requests.packages.urllib3.responsesrequests.packages.urllib3.utils)requests.packages.urllib3.util.connections&requests.packages.urllib3.util.requests'requests.packages.urllib3.util.responses$requests.packages.urllib3.util.retrys#requests.packages.urllib3.util.ssl_s&requests.packages.urllib3.util.timeouts"requests.packages.urllib3.util.url(t__doc__t __future__Rtglobtos.pathtosR tFalset DEBUNDLEDtpathtabspathtdirnamet__file__t WHEEL_DIRRtjoin(((s8/usr/lib/python2.7/site-packages/pip/_vendor/__init__.pytsh    )                                          PK:e[^PP appdirs.pycnu[ abc@s@dZd,ZdjeeeZddlZddlZejddkZ e r^eZ nej j drddl Z e j ddZej d rd Zqej d rd Zqd Zn ej ZdddedZdddedZdddedZdddedZdddedZdddedZdefdYZdZdZdZdZed kr!yddlZ eZ!Wq!e"k ryddl#m$Z$eZ!Wqe"k ryddl%Z&eZ!Wqe"k reZ!qXqXq!Xne'dkr<dZ(dZ)d-Z*d$GHee(e)d%d&Z+x&e*D]Z,d'e,e-e+e,fGHq`Wd(GHee(e)Z+x&e*D]Z,d'e,e-e+e,fGHqWd)GHee(Z+x&e*D]Z,d'e,e-e+e,fGHqWd*GHee(d+eZ+x)e*D]Z,d'e,e-e+e,fGHqWndS(.syUtilities for determining application-specific dirs. See for details and usage. iiit.iNitjavatWindowstwin32tMactdarwintlinux2cCs6tdkr|dkr!|}n|r-dp0d}tjjt|}|r|tk rxtjj|||}qtjj||}qn{tdkrtjjd}|rtjj||}qn<tj dtjjd}|rtjj||}n|r2|r2tjj||}n|S( sJReturn full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". Rt CSIDL_APPDATAtCSIDL_LOCAL_APPDATARs~/Library/Application Support/t XDG_DATA_HOMEs~/.local/shareN( tsystemtNonetostpathtnormpatht_get_win_foldertFalsetjoint expandusertgetenv(tappnamet appauthortversiontroamingtconstR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt user_data_dir-s&      cCstdkr|d kr!|}ntjjtd}|r|tk rftjj|||}q~tjj||}qntdkrtjjd}|rtjj||}qntj dtj jddg}g|j tj D]$}tjj|j tj ^q}|rs|rEtjj||}ng|D]}tj j||g^qL}n|rtj j|}n |d}|S|r|rtjj||}n|S( siReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: macOS: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. RtCSIDL_COMMON_APPDATARs/Library/Application Supportt XDG_DATA_DIRSs/usr/local/shares /usr/shareiN(R R R R RRRRRRtpathseptsplittrstriptsep(RRRt multipathR txtpathlist((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt site_data_dirds4      =.  cCstdkr$t||d|}n<tjdtjjd}|r`tjj||}n|r|rtjj||}n|S(sReturn full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". RRtXDG_CONFIG_HOMEs ~/.config(RRN(R RR R RR RR(RRRRR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytuser_config_dirs  cCs tdkrBt||}|r|rtjj||}qntjdd}g|jtjD]$}tjj|j tj ^qg}|r|rtjj||}ng|D]}tj j||g^q}n|rtjj|}n |d}|S(sReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: macOS: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. RRtXDG_CONFIG_DIRSs/etc/xdgi(RR( R R#R R RRRRRRR(RRRR R R!R"((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytsite_config_dirs  =. cCsBtdkr|dkr!|}ntjjtd}|r|tk rftjj|||}ntjj||}|rtjj|d}qqn{tdkrtjjd}|rtjj||}qn<tj dtjjd}|rtjj||}n|r>|r>tjj||}n|S( sReturn full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: macOS: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. RRtCacheRs~/Library/CachestXDG_CACHE_HOMEs~/.cacheN( R R R R RRRRRR(RRRtopinionR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytuser_cache_dirs(!      cCstdkr0tjjtjjd|}n{tdkrut|||}t}|rtjj|d}qn6t|||}t}|rtjj|d}n|r|rtjj||}n|S(sReturn full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: macOS: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. Rs~/Library/LogsRtLogstlog(R R R RRRRR+(RRRR*R ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt user_log_dir:s     tAppDirscBs}eZdZddeedZedZedZedZ edZ edZ edZ RS( s1Convenience wrapper for getting application dirs.cCs1||_||_||_||_||_dS(N(RRRRR (tselfRRRRR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt__init__os     cCs%t|j|jd|jd|jS(NRR(RRRRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyRwscCs%t|j|jd|jd|jS(NRR (R#RRRR (R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR#|scCs%t|j|jd|jd|jS(NRR(R%RRRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR%scCs%t|j|jd|jd|jS(NRR (R'RRRR (R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR'scCst|j|jd|jS(NR(R+RRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR+scCst|j|jd|jS(NR(R.RRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR.sN( t__name__t __module__t__doc__R RR1tpropertyRR#R%R'R+R.(((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR/ms  cCs\ddl}idd6dd6dd6|}|j|jd }|j||\}}|S( sThis is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. iNtAppDataRsCommon AppDataRs Local AppDataRs@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders(t_winregtOpenKeytHKEY_CURRENT_USERt QueryValueEx(t csidl_nameR7tshell_folder_nametkeytdirttype((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_from_registrys  cCsddlm}m}|jdt||dd}yt|}t}x*|D]"}t|dkrSt}PqSqSW|ryddl }|j |}Wqt k rqXnWnt k rnX|S(Ni(tshellcontshellii( twin32com.shellRARBtSHGetFolderPathtgetattrtunicodeRtordtTruetwin32apitGetShortPathNamet ImportErrort UnicodeError(R;RARBR>t has_high_chartcRI((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_pywin32s$!      cCsddl}idd6dd6dd6|}|jd}|jjjd|dd |t}x*|D]"}t|d krft}PqfqfW|r|jd}|jj j |j |dr|}qn|j S( NiiRi#RiRiii( tctypestcreate_unicode_buffertwindlltshell32tSHGetFolderPathWR RRGRHtkernel32tGetShortPathNameWtvalue(R;RPt csidl_consttbufRMRNtbuf2((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_ctypess$   c Cs=ddl}ddlm}ddlm}|jjd}|jd|}|jj }|j dt |j |d|j j||jj|jjd}t}x*|D]"} t| dkrt}PqqW|r9|jd|}|jj } tj|||r9|jj|jjd}q9n|S(Ni(tjna(RiRNsi(tarraytcom.sunR\tcom.sun.jna.platformRtWinDeftMAX_PATHtzerostShell32tINSTANCERDR REtShlObjtSHGFP_TYPE_CURRENTtNativettoStringttostringRRRGRHtKernel32tkernalRJ( R;R]R\Rtbuf_sizeRYRBR>RMRNtkernel((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_jnas&  +!  '(RRt__main__tMyAppt MyCompanyRR#R%R'R+R.s%-- app dirs (with optional 'version')Rs1.0s%s: %ss) -- app dirs (without optional 'version')s+ -- app dirs (without optional 'appauthor')s( -- app dirs (with disabled 'appauthor')R(iii(RR#R%R'R+R.(.R4t__version_info__Rtmaptstrt __version__tsysR t version_infotPY3RFtplatformt startswithtjava_vertos_nameR R RRR#R%R'RHR+R.tobjectR/R@ROR[RnRCtwin32comRRKRPRRt com.sun.jnatcomR2RRtpropstdirstpropRE(((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt s~        7B(393+                   PK:e[^PP appdirs.pyonu[ abc@s@dZd,ZdjeeeZddlZddlZejddkZ e r^eZ nej j drddl Z e j ddZej d rd Zqej d rd Zqd Zn ej ZdddedZdddedZdddedZdddedZdddedZdddedZdefdYZdZdZdZdZed kr!yddlZ eZ!Wq!e"k ryddl#m$Z$eZ!Wqe"k ryddl%Z&eZ!Wqe"k reZ!qXqXq!Xne'dkr<dZ(dZ)d-Z*d$GHee(e)d%d&Z+x&e*D]Z,d'e,e-e+e,fGHq`Wd(GHee(e)Z+x&e*D]Z,d'e,e-e+e,fGHqWd)GHee(Z+x&e*D]Z,d'e,e-e+e,fGHqWd*GHee(d+eZ+x)e*D]Z,d'e,e-e+e,fGHqWndS(.syUtilities for determining application-specific dirs. See for details and usage. iiit.iNitjavatWindowstwin32tMactdarwintlinux2cCs6tdkr|dkr!|}n|r-dp0d}tjjt|}|r|tk rxtjj|||}qtjj||}qn{tdkrtjjd}|rtjj||}qn<tj dtjjd}|rtjj||}n|r2|r2tjj||}n|S( sJReturn full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". Rt CSIDL_APPDATAtCSIDL_LOCAL_APPDATARs~/Library/Application Support/t XDG_DATA_HOMEs~/.local/shareN( tsystemtNonetostpathtnormpatht_get_win_foldertFalsetjoint expandusertgetenv(tappnamet appauthortversiontroamingtconstR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt user_data_dir-s&      cCstdkr|d kr!|}ntjjtd}|r|tk rftjj|||}q~tjj||}qntdkrtjjd}|rtjj||}qntj dtj jddg}g|j tj D]$}tjj|j tj ^q}|rs|rEtjj||}ng|D]}tj j||g^qL}n|rtj j|}n |d}|S|r|rtjj||}n|S( siReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: macOS: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. RtCSIDL_COMMON_APPDATARs/Library/Application Supportt XDG_DATA_DIRSs/usr/local/shares /usr/shareiN(R R R R RRRRRRtpathseptsplittrstriptsep(RRRt multipathR txtpathlist((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt site_data_dirds4      =.  cCstdkr$t||d|}n<tjdtjjd}|r`tjj||}n|r|rtjj||}n|S(sReturn full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". RRtXDG_CONFIG_HOMEs ~/.config(RRN(R RR R RR RR(RRRRR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytuser_config_dirs  cCs tdkrBt||}|r|rtjj||}qntjdd}g|jtjD]$}tjj|j tj ^qg}|r|rtjj||}ng|D]}tj j||g^q}n|rtjj|}n |d}|S(sReturn full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: macOS: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. RRtXDG_CONFIG_DIRSs/etc/xdgi(RR( R R#R R RRRRRRR(RRRR R R!R"((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytsite_config_dirs  =. cCsBtdkr|dkr!|}ntjjtd}|r|tk rftjj|||}ntjj||}|rtjj|d}qqn{tdkrtjjd}|rtjj||}qn<tj dtjjd}|rtjj||}n|r>|r>tjj||}n|S( sReturn full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: macOS: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. RRtCacheRs~/Library/CachestXDG_CACHE_HOMEs~/.cacheN( R R R R RRRRRR(RRRtopinionR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pytuser_cache_dirs(!      cCstdkr0tjjtjjd|}n{tdkrut|||}t}|rtjj|d}qn6t|||}t}|rtjj|d}n|r|rtjj||}n|S(sReturn full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: macOS: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. Rs~/Library/LogsRtLogstlog(R R R RRRRR+(RRRR*R ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt user_log_dir:s     tAppDirscBs}eZdZddeedZedZedZedZ edZ edZ edZ RS( s1Convenience wrapper for getting application dirs.cCs1||_||_||_||_||_dS(N(RRRRR (tselfRRRRR ((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt__init__os     cCs%t|j|jd|jd|jS(NRR(RRRRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyRwscCs%t|j|jd|jd|jS(NRR (R#RRRR (R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR#|scCs%t|j|jd|jd|jS(NRR(R%RRRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR%scCs%t|j|jd|jd|jS(NRR (R'RRRR (R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR'scCst|j|jd|jS(NR(R+RRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR+scCst|j|jd|jS(NR(R.RRR(R0((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR.sN( t__name__t __module__t__doc__R RR1tpropertyRR#R%R'R+R.(((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyR/ms  cCs\ddl}idd6dd6dd6|}|j|jd }|j||\}}|S( sThis is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. iNtAppDataRsCommon AppDataRs Local AppDataRs@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders(t_winregtOpenKeytHKEY_CURRENT_USERt QueryValueEx(t csidl_nameR7tshell_folder_nametkeytdirttype((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_from_registrys  cCsddlm}m}|jdt||dd}yt|}t}x*|D]"}t|dkrSt}PqSqSW|ryddl }|j |}Wqt k rqXnWnt k rnX|S(Ni(tshellcontshellii( twin32com.shellRARBtSHGetFolderPathtgetattrtunicodeRtordtTruetwin32apitGetShortPathNamet ImportErrort UnicodeError(R;RARBR>t has_high_chartcRI((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_pywin32s$!      cCsddl}idd6dd6dd6|}|jd}|jjjd|dd |t}x*|D]"}t|d krft}PqfqfW|r|jd}|jj j |j |dr|}qn|j S( NiiRi#RiRiii( tctypestcreate_unicode_buffertwindlltshell32tSHGetFolderPathWR RRGRHtkernel32tGetShortPathNameWtvalue(R;RPt csidl_consttbufRMRNtbuf2((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_ctypess$   c Cs=ddl}ddlm}ddlm}|jjd}|jd|}|jj }|j dt |j |d|j j||jj|jjd}t}x*|D]"} t| dkrt}PqqW|r9|jd|}|jj } tj|||r9|jj|jjd}q9n|S(Ni(tjna(RiRNsi(tarraytcom.sunR\tcom.sun.jna.platformRtWinDeftMAX_PATHtzerostShell32tINSTANCERDR REtShlObjtSHGFP_TYPE_CURRENTtNativettoStringttostringRRRGRHtKernel32tkernalRJ( R;R]R\Rtbuf_sizeRYRBR>RMRNtkernel((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt_get_win_folder_with_jnas&  +!  '(RRt__main__tMyAppt MyCompanyRR#R%R'R+R.s%-- app dirs (with optional 'version')Rs1.0s%s: %ss) -- app dirs (without optional 'version')s+ -- app dirs (without optional 'appauthor')s( -- app dirs (with disabled 'appauthor')R(iii(RR#R%R'R+R.(.R4t__version_info__Rtmaptstrt __version__tsysR t version_infotPY3RFtplatformt startswithtjava_vertos_nameR R RRR#R%R'RHR+R.tobjectR/R@ROR[RnRCtwin32comRRKRPRRt com.sun.jnatcomR2RRtpropstdirstpropRE(((s7/usr/lib/python2.7/site-packages/pip/_vendor/appdirs.pyt s~        7B(393+                   PK.e[s{ re-vendor.pyonu[ abc@sddlZddlZddlZddlZddlZejjejjeZ dZ dZ dZ e dkreejdkre nejddkre qejdd kre qe ndS( iNcCsdGHtjddS(Ns"Usage: re-vendor.py [clean|vendor]i(tsystexit(((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytusage scCsqxNtjtD]=}tjjt|}tjj|rtj|qqWtjtjjtddS(Nssix.py( tostlistdirtheretpathtjointisdirtshutiltrmtreetunlink(tfntdirname((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytclean s cCsGtjddtddgx$tjdD]}tj|q,WdS(Ntinstalls-ts-rs vendor.txts *.egg-info(tpiptmainRtglobR R (R ((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytvendorst__main__iiRR(RRRRR RtabspathR t__file__RRRRt__name__tlentargv(((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pyts            PK.e[` re-vendor.pynu[import os import sys import pip import glob import shutil here = os.path.abspath(os.path.dirname(__file__)) def usage(): print("Usage: re-vendor.py [clean|vendor]") sys.exit(1) def clean(): for fn in os.listdir(here): dirname = os.path.join(here, fn) if os.path.isdir(dirname): shutil.rmtree(dirname) # six is a single file, not a package os.unlink(os.path.join(here, 'six.py')) def vendor(): pip.main(['install', '-t', here, '-r', 'vendor.txt']) for dirname in glob.glob('*.egg-info'): shutil.rmtree(dirname) if __name__ == '__main__': if len(sys.argv) != 2: usage() if sys.argv[1] == 'clean': clean() elif sys.argv[1] == 'vendor': vendor() else: usage() PK.e[wL&d&drequests/utils.pyonu[ abc@s\dZddlZddlZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl m Z ddl mZddlmZddlmZddlmZmZmZmZmZmZmZmZmZmZmZmZm Z m!Z!dd l"m#Z#dd l$m%Z%dd l&m'Z'm(Z(m)Z)m*Z*d@Z+ej,Z-idd6dd6Z.ej/dkrdZ0dZndZ1dZ2e3dZ4dZ5dZ6dZ7dZ8dZ9e3dZ:dZ;dZ<d Z=d!Z>d"Z?d#Z@d$ZAeBd%d&ZCd'ZDd(ZEd)ZFd*ZGd+ZHd,ZIejJd-ZKd.ZLdd/ZNd0ZOd1d2ZPd3ZQd4ZRd5jSd6ZTeTd7ZUeTd8ZVd9ZWd:ZXd;ZYejZd<Z[ejZd<Z\d=Z]d>Z^d?Z_dS(As requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. iNi(t __version__(tcerts(tto_native_string(tparse_http_list(tquoteturlparsetbyteststrt OrderedDicttunquotet getproxiest proxy_bypasst urlunparset basestringt integer_typestis_py3tproxy_bypass_environmenttgetproxies_environment(tcookiejar_from_dict(tCaseInsensitiveDict(t InvalidURLt InvalidHeadertFileModeWarningtUnrewindableBodyErrors.netrct_netrciPthttpithttpstWindowscCs"trddl}n ddl}yE|j|jd}|j|dd}|j|dd}Wntk rztSX| s| rtS|jd}x|D]w}|dkrd|krt Sn|j dd }|j d d }|j d d}t j ||t j rt SqWtS( Nis;Software\Microsoft\Windows\CurrentVersion\Internet Settingst ProxyEnableit ProxyOverridet;st.s\.t*s.*t?(Rtwinregt_winregtOpenKeytHKEY_CURRENT_USERt QueryValueExtOSErrortFalsetsplittTruetreplacetretmatchtI(thostR"tinternetSettingst proxyEnablet proxyOverridettest((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytproxy_bypass_registry.s2          cCs!trt|St|SdS(sReturn True, if the host should be bypassed. Checks proxy settings gathered from the environment, if specified, or the registry. N(RRR4(R/((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyR Os  cCs"t|dr|j}n|S(s/Returns an internal sequence dictionary update.titems(thasattrR5(td((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdict_to_sequence[scCsd}d}t|dr*t|}nt|drE|j}nmt|dry|j}Wntjk rzqXtj|j}d|j krt j dt qnt|drty|j }Wn,ttfk r|dk rq|}qqqtXt|drt|dkrty3|jdd |j }|j|pIdWqqttfk rmd}qqXqtn|dkrd}ntd||S( Nit__len__tlentfilenotbs%Requests has determined the content-length for this request using the binary size of the file: however, the file has been opened in text mode (i.e. without the 'b' flag in the mode). This may lead to an incorrect content-length. In Requests 3.0, support will be removed for files in text mode.ttelltseeki(tNoneR6R:R;tiotUnsupportedOperationtostfstattst_sizetmodetwarningstwarnRR=R'tIOErrorR>tmax(tot total_lengthtcurrent_positionR;((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt super_lends@       c CseyGddlm}m}d}x^tD]V}ytjjdj|}Wntk r_dSXtjj |r&|}Pq&q&W|dkrdSt |}d}t |t r|j d}n|jj|d} yG||j| } | r| drdnd} | | | d fSWn#|tfk rE|rFqFnXWnttfk r`nXdS( s;Returns the Requests tuple auth for a given url from netrc.i(tnetrctNetrcParseErrors~/{0}Nt:tasciiiii(RNROR?t NETRC_FILESRBtpatht expandusertformattKeyErrortexistsRt isinstanceRtdecodetnetlocR)tauthenticatorsRHt ImportErrortAttributeError( turlt raise_errorsRNROt netrc_pathtftloctritsplitstrR/Rtlogin_i((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_netrc_auths8    cCs[t|dd}|rWt|trW|ddkrW|ddkrWtjj|SdS(s0Tries to guess the filename of the given object.tnameitN(tgetattrR?RXR RBRStbasename(tobjRg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytguess_filenames%cCsD|dkrdSt|ttttfr:tdnt|S(sTake an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict s+cannot encode objects that are not 2-tuplesN(R?RXRRtbooltintt ValueErrorR(tvalue((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytfrom_key_val_lists  cCse|dkrdSt|ttttfr:tdnt|tjr[|j }nt |S(sTake an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. :rtype: list s+cannot encode objects that are not 2-tuplesN( R?RXRRRnRoRpt collectionstMappingR5tlist(Rq((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytto_key_val_lists cCshg}x[t|D]M}|d |dko8dknrSt|dd!}n|j|qW|S(sParse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list iit"(t_parse_list_headertunquote_header_valuetappend(Rqtresulttitem((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_list_headers $cCsi}xt|D]~}d|kr5d||>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict t=iiRwN(RxR?R)Ry(RqR{R|Rg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_dict_header1s  $cCsq|rm|d|dko%dknrm|dd!}| sN|d dkrm|jddjddSn|S( sUnquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str iiRwiis\\s\s\"(R+(Rqt is_filename((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyRyTs * cCs+i}x|D]}|j||j/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdict_from_cookiejarms cCs t||S(sReturns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar (R(RR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytadd_dict_to_cookiejar|scCsvtjdttjddtj}tjddtj}tjd}|j||j||j|S(slReturns encodings from given content string. :param content: bytestring to extract encodings from. sIn requests 3.0, get_encodings_from_content will be removed. For more information, please see the discussion on issue #2266. (This warning should only appear once.)s!]tflagss+]s$^<\?xml.*?encoding=["\']*(.+?)["\'>](RFRGtDeprecationWarningR,tcompileR.tfindall(tcontentt charset_ret pragma_retxml_re((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_encodings_from_contentscCs_|jd}|sdStj|\}}d|krK|djdSd|kr[dSdS(s}Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str s content-typetcharsets'"ttexts ISO-8859-1N(tgetR?tcgit parse_headertstrip(theaderst content_typetparams((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_encoding_from_headerss  ccs|jdkr)x|D] }|VqWdStj|jdd}x+|D]#}|j|}|rK|VqKqKW|jddt}|r|VndS(sStream decodes a iterator.NterrorsR+ttfinal(tencodingR?tcodecstgetincrementaldecoderRYR*(titeratortrR|tdecodertchunktrv((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytstream_decode_response_unicodes    ccsdd}|dks|dkr-t|}nx0|t|kr_||||!V||7}q0WdS(s Iterate over slices of a string.iN(R?R:(tstringt slice_lengthtpos((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt iter_slicess cCstjdtg}t|j}|rcyt|j|SWqctk r_|j|qcXnyt|j|ddSWnt k r|jSXdS(sReturns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. fall back and replace all unicode characters :rtype: str sIn requests 3.0, get_unicode_from_response will be removed. For more information, please see the discussion on issue #2266. (This warning should only appear once.)RR+N( RFRGRRRRRt UnicodeErrorRzt TypeError(Rttried_encodingsR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_unicode_from_responses   t4ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzs0123456789-._~cCs|jd}xtdt|D]}||dd!}t|dkr|jrytt|d}Wn!tk rtd|nX|tkr|||d||/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytunquote_unreserveds  cCsKd}d}ytt|d|SWntk rFt|d|SXdS(sRe-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str s!#$%&'()*+,/:;=?@[]~s!#$&'()*+,/:;=?@[]~tsafeN(RRR(Rtsafe_with_percenttsafe_without_percent((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt requote_uri s  cCstjdtj|d}|jd\}}tjdtjtt|d}tjdtj|d|@}||@||@kS(sThis function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool s=Lit/(tstructtunpacktsockett inet_atonR)tdotted_netmaskRo(tiptnettipaddrtnetaddrtbitstnetmasktnetwork((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytaddress_in_network#s +#cCs/ddd|>dA}tjtjd|S(sConverts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str Iii s>I(Rt inet_ntoaRtpack(tmaskR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyR2scCs-ytj|Wntjk r(tSXtS(s :rtype: bool (RRterrorR(R*(t string_ip((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytis_ipv4_address=s cCs|jddkryt|jdd}Wntk rFtSX|dks_|dkrctSytj|jddWqtjk rtSXntStS(sV Very simple check of the cidr format in no_proxy variable. :rtype: bool Rii i( tcountRoR)RpR(RRRR*(tstring_networkR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt is_valid_cidrHs ccst|dk }|r4tjj|}|tj|/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt set_environ`s    c Cscd}|}|d kr*|d}nt|j}|r d|jddjdD}|jdd}t|rx|D]8}t|rt||rtSq||krtSqWq x@|D]5}|j |s|jddj |rtSqWnt d|8yt |}Wn t t jfk rNt}nXWd QX|r_tStS( sL Returns whether we should bypass proxies or not. :rtype: bool cSs(tjj|p'tjj|jS(N(RBRRtupper(tk((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt|Rtno_proxycss|]}|r|VqdS(N((t.0R/((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pys st Rt,RPiN(R?RRZR+R)RRRR*tendswithRR RRtgaierrorR(( R^Rt get_proxyt no_proxy_argRZRtproxy_ipR/tbypass((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytshould_bypass_proxiesvs4  %      + cCs!t|d|riStSdS(sA Return a dict of environment proxies. :rtype: dict RN(RR (R^R((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_environ_proxiesscCs|p i}t|}|jdkrC|j|j|jdS|jd|j|jd|jdg}d}x(|D] }||krz||}PqzqzW|S(sSelect a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs talls://sall://N(RthostnameR?Rtscheme(R^tproxiesturlpartst proxy_keystproxyt proxy_key((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt select_proxys       spython-requestscCsd|tfS(sO Return a string representing the default user agent. :rtype: str s%s/%s(R(Rg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdefault_user_agentscCs2titd6djd d6dd6dd 6S( s9 :rtype: requests.structures.CaseInsensitiveDict s User-Agents, tgziptdeflatesAccept-Encodings*/*tAccepts keep-alivet Connection(RR(RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdefault_headerss  c Csg}d}xtjd|D]}y|jdd\}}Wntk ra|d}}nXi|jdd6}xa|jdD]P}y|jd\}}Wntk rPnX|j|||j|; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" :rtype: list s '"s, * '"R^R~(R,R)RpRRz( Rqtlinkst replace_charstvalR^Rtlinktparamtkey((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_header_linkss    sRQiicCs|d }|tjtjfkr&dS|d tjkr=dS|d tjtjfkr]dS|jt}|dkr|dS|dkr|d d dtkrd S|d d dtkrd Sn|dkr|d t krd S|d t krdSnd S(s :rtype: str isutf-32is utf-8-sigisutf-16isutf-8Ns utf-16-beis utf-16-les utf-32-bes utf-32-le( Rt BOM_UTF32_LEt BOM_UTF32_BEtBOM_UTF8t BOM_UTF16_LEt BOM_UTF16_BERt_nullt_null2t_null3R?(tdatatsamplet nullcount((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytguess_json_utfs*    cCsSt||\}}}}}}|s7||}}nt||||||fS(sGiven a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str (RR (R^t new_schemeRRZRSRtquerytfragment((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytprepend_scheme_if_needed1s!cCsRt|}y"t|jt|jf}Wnttfk rMd}nX|S(s{Given a url with authentication components, extract them into a tuple of username,password. :rtype: (str,str) R(RR(RR tusernametpasswordR]R(R^tparsedtauth((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_auth_from_urlBs  " s^\S[^\r\n]*$|^$cCs|\}}t|tr$t}nt}y&|j|sOtd|nWn0tk rtd||t|fnXdS(sVerifies that header value is a string which doesn't contain leading whitespace or return characters. This prevents unintended header injection. :param header: tuple, in the format (name, value). s7Invalid return character or leading space in header: %ss>Value for header {%s: %s} must be of type str or bytes, not %sN(RXRt_CLEAN_HEADER_REGEX_BYTEt_CLEAN_HEADER_REGEX_STRR-RRttype(theaderRgRqtpat((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytcheck_header_validityWs   cCsft|\}}}}}}|s4||}}n|jddd}t|||||dfS(sW Given a url remove the fragment and the authentication part. :rtype: str t@iiR(RtrsplitR (R^RRZRSRR R ((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt urldefragauthls cCs}t|jdd}|dk rmt|jtrmy||jWqyttfk ritdqyXn tddS(sfMove file pointer back to its recorded starting position so it can be read again on redirect. R>s;An error occurred when rewinding request body for redirect.s+Unable to rewind request body for redirect.N( RjtbodyR?RXt_body_positionRRHR'R(tprepared_requestt body_seek((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt rewind_body}s(s.netrcR(`t__doc__RRRst contextlibR@RBtplatformR,RRRFRRRt_internal_utilsRtcompatRRxRRRRRR R R R R RRRRtcookiesRt structuresRt exceptionsRRRRRRtwheretDEFAULT_CA_BUNDLE_PATHt DEFAULT_PORTStsystemR4R8RMR(RfRmRrRvR}RRyRRRRRRRt frozensetRRRRRRRtcontextmanagerRRR?RRRRRtencodeRRRR RRRRRRRR!(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt s           ^"  ! = 3    #      %      9  "      PK.e[>l55requests/compat.pycnu[ abc@s5dZddlmZddlZejZeddkZeddkZddlZerGddl m Z m Z m Z m Z mZmZmZmZmZddlmZmZmZmZmZdd lmZddlZdd lmZdd lmZdd lmZe Z!e Z"e#Z e$Z$e%e&e'fZ(e%e&fZ)ner1dd l*mZmZmZmZmZm Z m Z m Z m Z mZddl+mZmZmZmZmZddl,m-Zdd l.mZdd l/mZdd l0mZe Z!e Z e"Z"e e"fZ$e%e'fZ(e%fZ)ndS(sq requests.compat ~~~~~~~~~~~~~~~ This module handles import compatibility issues between Python 2 and Python 3. i(tchardetNiii( tquotetunquotet quote_plust unquote_plust urlencodet getproxiest proxy_bypasstproxy_bypass_environmenttgetproxies_environment(turlparset urlunparseturljointurlsplitt urldefrag(tparse_http_list(tMorsel(tStringIO(t OrderedDict( R R R R RRRRRR(RRRRR (t cookiejar(1t__doc__t pip._vendorRtsyst version_infot_vertis_py2tis_py3tjsonturllibRRRRRRRRR R R R R Rturllib2Rt cookielibtCookieRRt)pip._vendor.urllib3.packages.ordered_dictRtstrt builtin_strtbytestunicodet basestringtinttlongtfloatt numeric_typest integer_typest urllib.parseturllib.requestthttpRt http.cookiestiot collections(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/compat.pyt sB   @( F(  PK.e[!requests/status_codes.pycnu[ abc@skddlmZiDdd6dd6dd6dd 6dd 6dd6dd6dd6dd6dd6dd 6dd#6dd(6dd*6dd,6dd.6dd26dd46dd76dd96dd;6dd=6ddA6ddE6ddH6ddJ6ddM6ddO6ddR6ddU6ddW6dd[6dd^6dd`6ddb6ddd6ddg6ddi6ddk6ddo6dds6ddu6ddy6dd{6dd~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6ZeddZxcejD]U\ZZxFeD]>Zeeeeej ds!eeej eq!q!WqWdS(i(t LookupDicttcontinueidtswitching_protocolsiet processingift checkpointigt uri_too_longtrequest_uri_too_longiztoktokaytall_oktall_okaytall_goods\o/s✓itcreateditaccepteditnon_authoritative_infotnon_authoritative_informationit no_contentit reset_contenttresetitpartial_contenttpartialit multi_statustmultiple_statust multi_statitmultiple_statiitalready_reporteditim_useditmultiple_choicesi,tmoved_permanentlytmoveds\o-i-tfoundi.t see_othertotheri/t not_modifiedi0t use_proxyi1t switch_proxyi2ttemporary_redirectttemporary_movedt temporaryi3tpermanent_redirecttresume_incompletetresumei4t bad_requesttbadit unauthorizeditpayment_requiredtpaymentit forbiddenit not_founds-o-itmethod_not_allowedt not_alloweditnot_acceptableitproxy_authentication_requiredt proxy_authtproxy_authenticationitrequest_timeoutttimeoutitconflictitgoneitlength_requireditprecondition_failedt preconditionitrequest_entity_too_largeitrequest_uri_too_largeitunsupported_media_typetunsupported_mediat media_typeitrequested_range_not_satisfiabletrequested_rangetrange_not_satisfiableitexpectation_failedit im_a_teapottteapott i_am_a_teapotitmisdirected_requestitunprocessable_entityt unprocessableitlockeditfailed_dependencyt dependencyitunordered_collectiont unordereditupgrade_requiredtupgradeitprecondition_requiredittoo_many_requeststtoo_manyitheader_fields_too_largetfields_too_largeit no_responsetnoneit retry_withtretryit$blocked_by_windows_parental_controlstparental_controlsitunavailable_for_legal_reasonst legal_reasonsitclient_closed_requestitinternal_server_errort server_errors/o\s✗itnot_implementedit bad_gatewayitservice_unavailablet unavailableitgateway_timeoutithttp_version_not_supportedt http_versionitvariant_also_negotiatesitinsufficient_storageitbandwidth_limit_exceededt bandwidthit not_extendeditnetwork_authentication_requiredt network_authtnetwork_authenticationitnamet status_codess\t/N(R(R(R(R(RR(RRR R R s\o/s✓(R (R (RR(R(RR(RR(RRRR(R(R(R(RRs\o-(R(RR (R!(R"(R#(R$R%R&(R'R(R)(R*R+(R,(R-R.(R/(R0s-o-(R1R2(R3(R4R5R6(R7R8(R9(R:(R;(R<R=(R>(R?(R@RARB(RCRDRE(RF(RGRHRI(RJ(RKRL(RM(RNRO(RPRQ(RRRS(RTR=(RURV(RWRX(RYRZ(R[R\(R]R^(R_R`(Ra(RbRcs/o\s✗(Rd(Re(RfRg(Rh(RiRj(Rk(Rl(RmRn(Ro(RpRqRr(s\Ru( t structuresRt_codestcodestitemstcodettitlesttitletsetattrt startswithtupper(((sE/usr/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.pyts  PK.e[c/]]requests/api.pynu[# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request `. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response ` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response """ return request('delete', url, **kwargs) PK.e[6dWdWrequests/sessions.pycnu[ abc@s+dZddlZddlZddlZddlmZddlmZddlm Z ddl m Z m Z m Z mZmZddlmZmZmZmZdd lmZmZmZdd lmZmZdd lmZdd lmZm Z dd l!m"Z"m#Z#m$Z$m%Z%ddl&m'Z'ddl(m)Z)ddlm*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0ddl1m2Z2ddlm3Z3ej4dkry ej5Z6Wne7k rej8Z6nXn ejZ6e dZ9e dZ:de;fdYZ<de<fdYZ=dZ>dS(s requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). iN(tMapping(t timedeltai(t_basic_auth_str(t cookielibtis_py3t OrderedDictturljointurlparse(tcookiejar_from_dicttextract_cookies_to_jartRequestsCookieJart merge_cookies(tRequesttPreparedRequesttDEFAULT_REDIRECT_LIMIT(t default_hookst dispatch_hook(tto_native_string(tto_key_val_listtdefault_headers(tTooManyRedirectst InvalidSchematChunkedEncodingErrortContentDecodingError(tCaseInsensitiveDict(t HTTPAdapter(t requote_uritget_environ_proxiestget_netrc_authtshould_bypass_proxiestget_auth_from_urlt rewind_bodyt DEFAULT_PORTS(tcodes(tREDIRECT_STATItWindowscCs|dkr|S|dkr |St|to;t|tsB|S|t|}|jt|g|jD]\}}|dkrt|^qt}x|D] }||=qW|S(sDetermines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` N(tNonet isinstanceRRtupdatetitems(trequest_settingtsession_settingt dict_classtmerged_settingtktvt none_keystkey((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt merge_setting2s  1  cCsZ|dks!|jdgkr%|S|dksF|jdgkrJ|St|||S(sProperly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. tresponseN(R$tgetR0(t request_hookst session_hooksR*((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt merge_hooksQs !!tSessionRedirectMixincBsPeZdZdZededdedZdZdZ dZ RS(cCs?|jr;|jd}tr.|jd}nt|dSdS(s7Receives a Response. Returns a redirect URI or ``None``tlocationtlatin1tutf8N(t is_redirecttheadersRtencodeRR$(tselftrespR7((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytget_redirect_targetbs    cCst|}t|}|j|jkr.tS|jdkrn|jdkrn|jdkrn|jdkrntS|j|jk}|j|jk}tj|jddf}| r|j|kr|j|krtS|p|S(sFDecide whether Authorization header should be removed when redirectingthttpiPthttpsiN(iPN(iN( RthostnametTruetschemetportR$tFalseR R2(R=told_urltnew_urlt old_parsedt new_parsedt changed_porttchanged_schemet default_port((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytshould_strip_authxs  c ksg} |j|} x| r|j} | j|| d|_y |jWn-tttfk r~|jj dt nXt |j|j krt d|j d|n|j| jdrt|j} dt| j| f} nt| }|j} |js3t|jt| } n t| } t| | _|j| ||jtjtjfkrd}x!|D]}| jj|dqWd| _ n| j}y |d =Wnt!k rnXt"| j#||jt$| j#|j%| j&| j#|j'| |}|j(| || j)dk oVd|kpVd |k}|rlt*| n| }|r|Vq|j+|d |d |d |d|d|dt | }t"|j%| |j|j|} |VqWdS(sBReceives a Response. Returns a generator of Responses or Requests.itdecode_contentsExceeded %s redirects.R1s//s%s:%ssContent-Lengths Content-TypesTransfer-EncodingtCookietstreamttimeouttverifytcerttproxiestallow_redirectsN(sContent-Lengths Content-TypesTransfer-Encoding(,R?tcopytappendthistorytcontentRRt RuntimeErrortrawtreadRFtlent max_redirectsRtcloset startswithRturlRRDtgeturltnetlocRRtrebuild_methodt status_codeR!ttemporary_redirecttpermanent_redirectR;tpopR$tbodytKeyErrorR t_cookiesR tcookiestprepare_cookiestrebuild_proxiest rebuild_autht_body_positionRtsend(R=R>treqRQRRRSRTRUtyield_requeststadapter_kwargsthistRbtprepared_requestt parsed_rurltparsedtpurged_headerstheaderR;t rewindable((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytresolve_redirectssr                 cCs{|j}|j}d|kr@|j|jj|r@|d=n|jrUt|nd}|dk rw|j|ndS(sWhen being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. t AuthorizationN(R;RbRNtrequestt trust_envRR$t prepare_auth(R=RwR1R;Rbtnew_auth((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRps  $  c Cs5|dk r|ni}|j}|j}t|j}|j}|jd}t|d|}|jr| rt |d|} | j|| jd} | r|j || qnd|kr|d=nyt ||\} } Wnt k rd\} } nX| r1| r1t | | |d>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') R;RmtauthRUthookstparamsRSRTtprefetchtadaptersRQRR_cCst|_d|_i|_t|_i|_t|_ t |_ d|_ t |_t |_ti|_t|_|jdt|jdtdS(Nshttps://shttp://(RR;R$RRURRRRFRQRCRSRTRR_RRRmRRtmountR(R=((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt__init__js           cCs|S(N((R=((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __enter__scGs|jdS(N(R`(R=targs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt__exit__scCs*|jp i}t|tjs0t|}nttt|j|}|j}|jr| r|j rt |j }nt }|j d|j jd|j d|jd|jd|jdt|j|jdtdt|j|jd t||jd |d t|j|j |S( sConstructs a :class:`PreparedRequest ` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request ` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest RRbtfilestdatatjsonR;R*RRRmR(RmR%Rt CookieJarRR R RRRRbR tprepareRtupperRRRR0R;RRR5R(R=RRmtmerged_cookiesRtp((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytprepare_requests*        cCstd|jd|d|d|d|p-id|d|p?id|d |d | }|j|}| poi} |j|j| | ||}i| d 6| d 6}|j||j||}|S( sConstructs a :class:`Request `, prepares it and sends it. Returns :class:`Response ` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response RRbR;RRRRRRmRRRRV(R RRtmerge_environment_settingsRbR&Rr(R=RRbRRR;RmRRRRRVRURRQRSRTRRstpreptsettingst send_kwargsR>((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRs*)       cKs#|jdt|jd||S(sSends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVR(RRCR(R=Rbtkwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyR2scKs#|jdt|jd||S(sSends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVtOPTIONS(RRCR(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytoptions!scKs#|jdt|jd||S(sSends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVR(RRFR(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pythead,scKs|jd|d|d||S(sSends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RRR(R(R=RbRRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytpost7s cKs|jd|d||S(sYSends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tPUTR(R(R=RbRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytputCs cKs|jd|d||S(s[Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tPATCHR(R(R=RbRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytpatchNs cKs|jd||S(sSends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tDELETE(R(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytdeleteYsc Ks|jd|j|jd|j|jd|j|jd|jt|trjtdn|jdt }|j d}|j }|j d|j }t}|j||}t|} td| |_td |||}|jr1x-|jD]} t|j| j| jq Wnt|j||j|j|||} |r{g| D]} | ^qing} | r| jd || j}| |_n|sy(t|j||d t ||_Wqtk rqXn|s|jn|S( sISend a given PreparedRequest. :rtype: requests.Response RQRSRTRUs#You can only send PreparedRequests.RVRbtsecondsR1iRt(RRQRSRTRUR%R t ValueErrorRiRCR2Rt get_adapterRbtpreferred_clockRrRtelapsedRRYR RmRR\R}tinserttnextt_nextt StopIterationRZ( R=RRRVRQRtadaptertstarttrRR>tgenRY((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRrcsB     %  (  c Cs|jr|dk r$|jdnd}t|d|}x*|jD]\}} |j|| qIW|tks|dkrtjjdptjjd}qnt ||j }t ||j }t ||j }t ||j }i|d6|d6|d6|d6S( s^ Check the environment and merge it with some settings. :rtype: dict RtREQUESTS_CA_BUNDLEtCURL_CA_BUNDLERSRURQRTN(RR$R2RR'RRCtostenvironR0RURQRSRT( R=RbRURQRSRTRt env_proxiesR,R-((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRs !cCsMx6|jjD]%\}}|jj|r|SqWtd|dS(s~ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter s*No connection adapters were found for '%s'N(RR'tlowerRaR(R=RbtprefixR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRscCs(x!|jjD]}|jqWdS(s+Closes all adapters and as such the sessionN(RtvaluesR`(R=R-((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyR`scCso||j|s(tdictt __attrs__(R=tstate((R=sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __getstate__scCs1x*|jD]\}}t|||q WdS(N(R'tsetattr(R=RRtvalue((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __setstate__sN(RRt__doc__RRRRRR$RCRR2RRRRRRRrRRR`RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRQs2  7   ) D  I    cCstS(sQ Returns a :class:`Session` for context-management. :rtype: Session (R(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytsessions(?RRtplatformttimet collectionsRtdatetimeRRRtcompatRRRRRRmRR R R tmodelsR R RRRRt_internal_utilsRtutilsRRt exceptionsRRRRt structuresRRRRRRRRRR t status_codesR!R"tsystemt perf_counterRtAttributeErrortclockR0R5tobjectR6RR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt s<   (""4     PK.e[|y%requests/__version__.pynu[# .-. .-. .-. . . .-. .-. .-. .-. # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' __title__ = 'requests' __description__ = 'Python HTTP for Humans.' __url__ = 'http://python-requests.org' __version__ = '2.18.4' __build__ = 0x021804 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2017 Kenneth Reitz' __cake__ = u'\u2728 \U0001f370 \u2728' PK.e[Nrequests/exceptions.pycnu[ abc@sdZddlmZdefdYZdefdYZdefdYZd efd YZd efd YZd efdYZ dee fdYZ de fdYZ defdYZ defdYZ deefdYZdeefdYZdeefdYZdeefdYZdefd YZd!eefd"YZd#eefd$YZd%efd&YZd'efd(YZd)efd*YZd+eefd,YZd-efd.YZd/S(0s` requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. i(t HTTPErrortRequestExceptioncBseZdZdZRS(sTThere was an ambiguous exception that occurred while handling your request. cOs|jdd}||_|jdd|_|dk rg|j rgt|drg|jj|_ntt|j||dS(sBInitialize RequestException with `request` and `response` objects.tresponsetrequestN(tpoptNoneRRthasattrtsuperRt__init__(tselftargstkwargsR((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRs (t__name__t __module__t__doc__R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR sRcBseZdZRS(sAn HTTP error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRstConnectionErrorcBseZdZRS(sA Connection error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR st ProxyErrorcBseZdZRS(sA proxy error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR$stSSLErrorcBseZdZRS(sAn SSL error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR(stTimeoutcBseZdZRS(sThe request timed out. Catching this error will catch both :exc:`~requests.exceptions.ConnectTimeout` and :exc:`~requests.exceptions.ReadTimeout` errors. (R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR,stConnectTimeoutcBseZdZRS(sThe request timed out while trying to connect to the remote server. Requests that produced this error are safe to retry. (R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR5st ReadTimeoutcBseZdZRS(s@The server did not send any data in the allotted amount of time.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR<st URLRequiredcBseZdZRS(s*A valid URL is required to make a request.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR@stTooManyRedirectscBseZdZRS(sToo many redirects.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRDst MissingSchemacBseZdZRS(s/The URL schema (e.g. http or https) is missing.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRHst InvalidSchemacBseZdZRS(s"See defaults.py for valid schemas.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRLst InvalidURLcBseZdZRS(s%The URL provided was somehow invalid.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRPst InvalidHeadercBseZdZRS(s.The header value provided was somehow invalid.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRTstChunkedEncodingErrorcBseZdZRS(s?The server declared chunked encoding but sent an invalid chunk.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRXstContentDecodingErrorcBseZdZRS(s!Failed to decode response content(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR\stStreamConsumedErrorcBseZdZRS(s2The content for this response was already consumed(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR`st RetryErrorcBseZdZRS(sCustom retries logic failed(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRdstUnrewindableBodyErrorcBseZdZRS(s:Requests encountered an error when trying to rewind a body(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRhstRequestsWarningcBseZdZRS(sBase warning for Requests.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR nstFileModeWarningcBseZdZRS(sJA file was opened in text mode, but Requests determined its binary length.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR!sstRequestsDependencyWarningcBseZdZRS(s@An imported dependency doesn't match the expected version range.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR"xsN(Rtpip._vendor.urllib3.exceptionsRt BaseHTTPErrortIOErrorRRRRRRRRRt ValueErrorRRRRRRt TypeErrorRRRtWarningR tDeprecationWarningR!R"(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyts. PK.e[requests/certs.pynu[#!/usr/bin/env python # -*- coding: utf-8 -*- """ requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. """ from pip._vendor.certifi import where if __name__ == '__main__': print(where()) PK.e[9HHrequests/_internal_utils.pynu[# -*- coding: utf-8 -*- """ requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ from .compat import is_py2, builtin_str, str def to_native_string(string, encoding='ascii'): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """ if isinstance(string, builtin_str): out = string else: if is_py2: out = string.encode(encoding) else: out = string.decode(encoding) return out def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode('ascii') return True except UnicodeEncodeError: return False PK.e[C3 G Grequests/cookies.pynu[# -*- coding: utf-8 -*- """ requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import copy import time import calendar import collections from ._internal_utils import to_native_string from .compat import cookielib, urlparse, urlunparse, Morsel try: import threading except ImportError: import dummy_threading as threading class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain host = to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query, parsed.fragment ]) def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() @property def origin_req_host(self): return self.get_origin_req_host() @property def host(self): return self.get_host() class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `cookielib` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ if not (hasattr(response, '_original_response') and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """ Produce an appropriate Cookie header string to be sent with `request`, or None. :rtype: str """ r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name != name: continue if domain is not None and domain != cookie.domain: continue if path is not None and path != cookie.path: continue clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific. """ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Requests does not use the dict interface internally; it's just for compatibility with external client code. All requests code should work out of the box with externally provided instances of ``CookieJar``, e.g. ``LWPCookieJar`` and ``FileCookieJar``. Unlike a regular CookieJar, this class is pickleable. .. warning:: dictionary operations that are normally O(1) may be O(n). """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1). """ try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def iterkeys(self): """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. .. seealso:: itervalues() and iteritems(). """ for cookie in iter(self): yield cookie.name def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. .. seealso:: values() and items(). """ return list(self.iterkeys()) def itervalues(self): """Dict-like itervalues() that returns an iterator of values of cookies from the jar. .. seealso:: iterkeys() and iteritems(). """ for cookie in iter(self): yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. .. seealso:: keys() and items(). """ return list(self.itervalues()) def iteritems(self): """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. .. seealso:: iterkeys() and itervalues(). """ for cookie in iter(self): yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs. .. seealso:: keys() and values(). """ return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool """ domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements. :rtype: dict """ dictionary = {} for cookie in iter(self): if ( (domain is None or cookie.domain == domain) and (path is None or cookie.path == path) ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: return super(RequestsCookieJar, self).__contains__(name) except CookieConflictError: return True def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1). """ return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead. """ self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s ``remove_cookie_by_name()``. """ remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): cookie.value = cookie.value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :return: cookie.value """ for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never used elsewhere in Requests. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :raises KeyError: if cookie is not found :raises CookieConflictError: if there are multiple cookies that match name and optionally domain and path :return: cookie.value """ toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError('There are multiple cookies with name, %r' % (name)) toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """Return a copy of this RequestsCookieJar.""" new_cj = RequestsCookieJar() new_cj.update(self) return new_cj def _copy_cookie_jar(jar): if jar is None: return None if hasattr(jar, 'copy'): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance new_jar = copy.copy(jar) new_jar.clear() for cookie in jar: new_jar.set_cookie(copy.copy(cookie)) return new_jar def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = dict( version=0, name=name, value=value, port=None, domain='', path='/', secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,) badargs = set(kwargs) - set(result) if badargs: err = 'create_cookie() got unexpected keyword arguments: %s' raise TypeError(err % list(badargs)) result.update(kwargs) result['port_specified'] = bool(result['port']) result['domain_specified'] = bool(result['domain']) result['domain_initial_dot'] = result['domain'].startswith('.') result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel['max-age']: try: expires = int(time.time() + int(morsel['max-age'])) except ValueError: raise TypeError('max-age: %s must be integer' % morsel['max-age']) elif morsel['expires']: time_template = '%a, %d-%b-%Y %H:%M:%S GMT' expires = calendar.timegm( time.strptime(morsel['expires'], time_template) ) return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar def merge_cookies(cookiejar, cookies): """Add cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError('You can only merge into CookieJar') if isinstance(cookies, dict): cookiejar = cookiejar_from_dict( cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) except AttributeError: for cookie_in_jar in cookies: cookiejar.set_cookie(cookie_in_jar) return cookiejar PK.e[!requests/status_codes.pyonu[ abc@skddlmZiDdd6dd6dd6dd 6dd 6dd6dd6dd6dd6dd6dd 6dd#6dd(6dd*6dd,6dd.6dd26dd46dd76dd96dd;6dd=6ddA6ddE6ddH6ddJ6ddM6ddO6ddR6ddU6ddW6dd[6dd^6dd`6ddb6ddd6ddg6ddi6ddk6ddo6dds6ddu6ddy6dd{6dd~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6ZeddZxcejD]U\ZZxFeD]>Zeeeeej ds!eeej eq!q!WqWdS(i(t LookupDicttcontinueidtswitching_protocolsiet processingift checkpointigt uri_too_longtrequest_uri_too_longiztoktokaytall_oktall_okaytall_goods\o/s✓itcreateditaccepteditnon_authoritative_infotnon_authoritative_informationit no_contentit reset_contenttresetitpartial_contenttpartialit multi_statustmultiple_statust multi_statitmultiple_statiitalready_reporteditim_useditmultiple_choicesi,tmoved_permanentlytmoveds\o-i-tfoundi.t see_othertotheri/t not_modifiedi0t use_proxyi1t switch_proxyi2ttemporary_redirectttemporary_movedt temporaryi3tpermanent_redirecttresume_incompletetresumei4t bad_requesttbadit unauthorizeditpayment_requiredtpaymentit forbiddenit not_founds-o-itmethod_not_allowedt not_alloweditnot_acceptableitproxy_authentication_requiredt proxy_authtproxy_authenticationitrequest_timeoutttimeoutitconflictitgoneitlength_requireditprecondition_failedt preconditionitrequest_entity_too_largeitrequest_uri_too_largeitunsupported_media_typetunsupported_mediat media_typeitrequested_range_not_satisfiabletrequested_rangetrange_not_satisfiableitexpectation_failedit im_a_teapottteapott i_am_a_teapotitmisdirected_requestitunprocessable_entityt unprocessableitlockeditfailed_dependencyt dependencyitunordered_collectiont unordereditupgrade_requiredtupgradeitprecondition_requiredittoo_many_requeststtoo_manyitheader_fields_too_largetfields_too_largeit no_responsetnoneit retry_withtretryit$blocked_by_windows_parental_controlstparental_controlsitunavailable_for_legal_reasonst legal_reasonsitclient_closed_requestitinternal_server_errort server_errors/o\s✗itnot_implementedit bad_gatewayitservice_unavailablet unavailableitgateway_timeoutithttp_version_not_supportedt http_versionitvariant_also_negotiatesitinsufficient_storageitbandwidth_limit_exceededt bandwidthit not_extendeditnetwork_authentication_requiredt network_authtnetwork_authenticationitnamet status_codess\t/N(R(R(R(R(RR(RRR R R s\o/s✓(R (R (RR(R(RR(RR(RRRR(R(R(R(RRs\o-(R(RR (R!(R"(R#(R$R%R&(R'R(R)(R*R+(R,(R-R.(R/(R0s-o-(R1R2(R3(R4R5R6(R7R8(R9(R:(R;(R<R=(R>(R?(R@RARB(RCRDRE(RF(RGRHRI(RJ(RKRL(RM(RNRO(RPRQ(RRRS(RTR=(RURV(RWRX(RYRZ(R[R\(R]R^(R_R`(Ra(RbRcs/o\s✗(Rd(Re(RfRg(Rh(RiRj(Rk(Rl(RmRn(Ro(RpRqRr(s\Ru( t structuresRt_codestcodestitemstcodettitlesttitletsetattrt startswithtupper(((sE/usr/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.pyts  PK.e[]< C C requests/help.pyonu[ abc@s dZddlmZddlZddlZddlZddlZddlmZddlm Z ddlm Z ddl m Z ydd lmZWn#ek rdZdZdZnXddlZddlZd Zd Zd Zed kr endS(s'Module containing bug report helper(s).i(tprint_functionN(tidna(turllib3(tchardeti(t __version__(t pyopensslcCstj}|dkr'tj}n|dkrdtjjtjjtjjf}tjjdkrdj |tjjg}qn<|dkrtj}n!|dkrtj}nd}i|d 6|d 6S( sReturn a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 2.7.5 it will return {'name': 'CPython', 'version': '2.7.5'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. tCPythontPyPys%s.%s.%stfinalttJythont IronPythontUnknowntnametversion( tplatformtpython_implementationtpython_versiontsystpypy_version_infotmajortminortmicrot releaseleveltjoin(timplementationtimplementation_version((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pyt_implementations       c Csqy$itjd6tjd6}Wn%tk rKidd6dd6}nXt}itjd6}itjd6}idd6dd6}t rit jd6dt j j d6}nit t ddd6}it tddd6}t td d}i|dk rd|ndd6}i |d 6|d 6|d 6tdk d 6|d6|d6|d6|d6|d6itd6d6S(s&Generate information for a bug report.tsystemtreleaseR RR topenssl_versions%xRtOPENSSL_VERSION_NUMBERRRt system_ssltusing_pyopensslt pyOpenSSLRRt cryptographyRtrequestsN(RRRtIOErrorRRRRtNonetOpenSSLtSSLRtgetattrR#RtsslRtrequests_version( t platform_infotimplementation_infot urllib3_infot chardet_infotpyopenssl_infotcryptography_infot idna_infoR tsystem_ssl_info((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pytinfo;sJ       cCs&ttjtdtdddS(s)Pretty-print the bug information as JSON.t sort_keystindentiN(tprinttjsontdumpsR4tTrue(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pytmainrst__main__(t__doc__t __future__RR8RRR*t pip._vendorRRRR RR+tpackages.urllib3.contribRt ImportErrorR&R'R#RR4R;t__name__(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pyts,         ! 7  PK.e[\++requests/structures.pycnu[ abc@sUdZddlZddlmZdejfdYZdefdYZdS( sO requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. iNi(t OrderedDicttCaseInsensitiveDictcBskeZdZd dZdZdZdZdZdZ dZ dZ d Z d Z RS( sA case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. cKs5t|_|dkr!i}n|j||dS(N(Rt_storetNonetupdate(tselftdatatkwargs((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__init__*s   cCs||f|j|j<s(Rtvalues(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__iter__;scCs t|jS(N(tlenR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__len__>scCsd|jjDS(s.Like iteritems(), but with all lowercase keys.css%|]\}}||dfVqdS(iN((Rtlowerkeytkeyval((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pys Ds(Rtitems(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt lower_itemsAscCsGt|tjr!t|}ntSt|jt|jkS(N(t isinstancet collectionstMappingRtNotImplementedtdictR(Rtother((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__eq__IscCst|jjS(N(RRR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pytcopyRscCstt|jS(N(tstrRR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__repr__UsN(t__name__t __module__t__doc__RRR R RRRRR R!R#(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyRs        t LookupDictcBs8eZdZddZdZdZddZRS(sDictionary lookup object.cCs ||_tt|jdS(N(tnametsuperR'R(RR(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR\s cCs d|jS(Ns (R((R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR#`scCs|jj|dS(N(t__dict__tgetR(RR ((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR cscCs|jj||S(N(R*R+(RR tdefault((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR+hsN(R$R%R&RRR#R R+(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR'Ys    (R&RtcompatRtMutableMappingRRR'(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyts JPK.e[jFKKrequests/__version__.pycnu[ abc@s@dZdZdZdZdZdZdZdZdZd Z d S( trequestssPython HTTP for Humans.shttp://python-requests.orgs2.18.4is Kenneth Reitzsme@kennethreitz.orgs Apache 2.0sCopyright 2017 Kenneth Reitzu ✨ 🍰 ✨N( t __title__t__description__t__url__t __version__t __build__t __author__t__author_email__t __license__t __copyright__t__cake__(((sD/usr/lib/python2.7/site-packages/pip/_vendor/requests/__version__.pytsPK.e[>trequests/__init__.pycnu[ abc@stdZddlmZddlmZddlZddlmZdZyeejejWn9e e fk rej dj ejejenXdd l mZejd edd lmZmZmZmZdd lmZmZmZmZdd lmZmZddlmZddlmZddlmZmZmZddl m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(ddl)m*Z*m+Z+ddl,m-Z-ddlm.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6ddl7Z7yddl7m8Z8Wn*e9k r@de7j:fdYZ8nXe7j;e<j=e8ejde4de>dS(s Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at . :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. i(turllib3(tchardetNi(tRequestsDependencyWarningcCs-|jd}|dgks$tt|dkrF|jdn|\}}}t|t|t|}}}|dkst|dkst|dkst|jdd \}}}t|t|t|}}}|dkst|dkst|dks)tdS( Nt.tdevit0iiii(tsplittAssertionErrortlentappendtint(turllib3_versiontchardet_versiontmajortminortpatch((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pytcheck_compatibility1s&&sAurllib3 ({0}) or chardet ({1}) doesn't match a supported version!(tDependencyWarningtignore(t __title__t__description__t__url__t __version__(t __build__t __author__t__author_email__t __license__(t __copyright__t__cake__(tutils(tpackages(tRequesttResponsetPreparedRequest(trequesttgettheadtpostRtputtdeletetoptions(tsessiontSession(tcodes( tRequestExceptiontTimeoutt URLRequiredtTooManyRedirectst HTTPErrortConnectionErrortFileModeWarningtConnectTimeoutt ReadTimeout(t NullHandlerR5cBseZdZRS(cCsdS(N((tselftrecord((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pytemitss(t__name__t __module__R8(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyR5rstdefaultR (?t__doc__t pip._vendorRRtwarningst exceptionsRRRRt ValueErrortwarntformattpip._vendor.urllib3.exceptionsRt simplefilterRRRRRRRRRtRRtmodelsRR R!tapiR"R#R$R%RR&R'R(tsessionsR)R*t status_codesR+R,R-R.R/R0R1R2R3R4tloggingR5t ImportErrortHandlert getLoggerR9t addHandlertTrue(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyt)s<    "":@  PK.e[2ۄWWrequests/cookies.pycnu[ abc@sQdZddlZddlZddlZddlZddlmZddlmZm Z m Z m Z yddl Z Wne k rddlZ nXdefdYZdefd YZd Zd Zddd Zd efdYZdejejfdYZdZdZdZdedZdZ dS(s requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. iNi(tto_native_string(t cookielibturlparset urlunparsetMorselt MockRequestcBseZdZdZdZdZdZdZdZdZ ddZ d Z d Z d Zed Zed ZedZRS(sWraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. cCs.||_i|_t|jjj|_dS(N(t_rt _new_headersRturltschemettype(tselftrequest((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt__init__&s  cCs|jS(N(R (R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_type+scCst|jjjS(N(RRRtnetloc(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_host.scCs |jS(N(R(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_origin_req_host1scCsx|jjjds|jjSt|jjddd}t|jj}t|j||j|j |j |j gS(NtHosttencodingsutf-8( RtheaderstgetRRRRR tpathtparamstquerytfragment(R thosttparsed((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt get_full_url4s cCstS(N(tTrue(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytis_unverifiableBscCs||jjkp||jkS(N(RRR(R tname((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt has_headerEscCs%|jjj||jj||S(N(RRRR(R Rtdefault((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt get_headerHscCstddS(sMcookielib has no legitimate use for this method; add it back if you find one.s=Cookie headers should be added with add_unredirected_header()N(tNotImplementedError(R tkeytval((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt add_headerKscCs||j|(RR'RQtresulttbadargsterr((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyROs0   cCs!d}|dr_y$ttjt|d}Wqtk r[td|dqXn2|drd}tjtj|d|}ntd|ddt |ddt d|dd|d |j d |d d dd i|d d6dt dt |dd|j d|dpd S(sBConvert a Morsel object into a Cookie containing the one k/v pair.smax-agesmax-age: %s must be integerRs%a, %d-%b-%Y %H:%M:%S GMTRRRRBRRRRthttponlyRRRR'RiN( R/tintttimet ValueErrorRtcalendarttimegmtstrptimeRORR`R$R'(tmorselRt time_template((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyRNs0 $       cCs|dkrt}n|dk rg|D]}|j^q+}x@|D]5}|s_||krG|jt|||qGqGWn|S(s-Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. N(R/RJRRPRO(t cookie_dictREt overwriteRGtnames_from_jarR((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytcookiejar_from_dicts    $cCst|tjs!tdnt|trKt|d|dt}nXt|tjry|j|Wqtk rx|D]}|j |qWqXn|S(sAdd cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. s!You can only merge into CookieJarRER( RMRRoRRRR`RqtAttributeErrorRP(REtcookiest cookie_in_jar((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt merge_cookies s  (!R.RpRRt collectionst_internal_utilsRtcompatRRRRRzt ImportErrortdummy_threadingtobjectRR1R=RAR/RHt RuntimeErrorRIRotMutableMappingRJRRORNRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt s,    " H    # PK.e[Grequests/_internal_utils.pyonu[ abc@s;dZddlmZmZmZddZdZdS(s requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) i(tis_py2t builtin_strtstrtasciicCsCt|tr|}n'tr0|j|}n|j|}|S(sGiven a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. (t isinstanceRRtencodetdecode(tstringtencodingtout((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pytto_native_strings  cCs.y|jdtSWntk r)tSXdS(sDetermine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool RN(RtTruetUnicodeEncodeErrortFalse(tu_string((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pytunicode_is_asciis   N(t__doc__tcompatRRRR R(((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pyt s PK.e[wrrrequests/models.pyonu[ abc@sdZddlZddlZddlZddlZddlmZddlm Z ddl m Z ddl m Z mZmZmZddlmZdd lmZdd lmZdd lmZdd lmZmZmZdd lmZmZm Z m!Z!m"Z"m#Z#m$Z$ddl%m&Z&m'Z'ddl(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2ddl3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=ddl3m>Z?ddl@mAZAeAjBeAjCeAjDeAjEeAjFfZGdZHddZIdZJdeKfdYZLdeKfdYZMdeMfdYZNdeLeMfdYZOdeKfd YZPdS(!s` requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. iN(t RequestField(tencode_multipart_formdata(t parse_url(t DecodeErrortReadTimeoutErrort ProtocolErrortLocationParseError(tUnsupportedOperationi(t default_hooks(tCaseInsensitiveDict(t HTTPBasicAuth(tcookiejar_from_dicttget_cookie_headert_copy_cookie_jar(t HTTPErrort MissingSchemat InvalidURLtChunkedEncodingErrortContentDecodingErrortConnectionErrortStreamConsumedError(tto_native_stringtunicode_is_ascii( tguess_filenametget_auth_from_urlt requote_uritstream_decode_response_unicodetto_key_val_listtparse_header_linkst iter_slicestguess_json_utft super_lentcheck_header_validity( t cookielibt urlunparseturlsplitt urlencodetstrtbytestis_py2tchardett builtin_strt basestring(tjson(tcodesii iitRequestEncodingMixincBs5eZedZedZedZRS(cCssg}t|j}|j}|s-d}n|j||j}|rf|jd|j|ndj|S(sBuild the path URL to use.t/t?t(R#turltpathtappendtquerytjoin(tselfR1tpR2R4((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytpath_url=s     cCst|ttfr|St|dr,|St|drg}xt|D]\}}t|tsyt|d r|g}nxl|D]d}|dk r|jt|tr|jdn|t|tr|jdn|fqqWqNWt |dt S|SdS(sEncode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. treadt__iter__sutf-8tdoseqN( t isinstanceR%R&thasattrRR*tNoneR3tencodeR$tTrue(tdatatresulttktvstv((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt_encode_paramsRs    !3c Cs]|stdnt|tr3tdng}t|pEi}t|pWi}x|D]\}}t|tst|d r|g}nx|D]}|d k rt|tst|}n|jt|tr|j dn|t|tr|j dn|fqqWqdWx|D] \}}d }d } t|t t frt |dkr|\} } qt |dkr|\} } }q|\} } }} nt|p|} |} t| tttfr| } n | j} td|d| d | d | } | jd ||j| q3Wt|\}}||fS( sBuild the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). sFiles must be provided.sData must not be a string.R:sutf-8iitnameRAtfilenametheaderst content_typeN(t ValueErrorR<R*RR=R>R&R%R3tdecodeR?ttupletlisttlenRt bytearrayR9Rtmake_multipartR(tfilesRAt new_fieldstfieldstfieldtvalRERCtfttfhtfntfptfdatatrftbodyRJ((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt _encode_filesmsH    !3  !(t__name__t __module__tpropertyR8t staticmethodRFR^(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR-<stRequestHooksMixincBseZdZdZRS(cCs||jkr"td|nt|tjrK|j|j|n0t|dr{|j|jd|DndS(sProperly register a hook.s1Unsupported event specified, with event name "%s"R:css'|]}t|tjr|VqdS(N(R<t collectionstCallable(t.0th((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pys sN(thooksRKR<RdReR3R=textend(R6teventthook((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt register_hooks cCs5y|j|j|tSWntk r0tSXdS(siDeregister a previously registered hook. Returns True if the hook existed, False if not. N(RhtremoveR@RKtFalse(R6RjRk((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytderegister_hooks  (R_R`RlRo(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRcs tRequestc BsGeZdZddddddddddd ZdZdZRS(sA user-created :class:`Request ` object. Used to prepare a :class:`PreparedRequest `, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> req.prepare() c Cs|dkrgn|}|dkr*gn|}|dkrBin|}|dkrZin|}| dkrrin| } t|_x6t| jD]"\} } |jd| d| qW||_||_||_||_ ||_ | |_ ||_ ||_ ||_dS(NRjRk(R>RRhRNtitemsRltmethodR1RIRRRAR+tparamstauthtcookies( R6RrR1RIRRRARsRtRuRhR+RCRE((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__init__s"         cCs d|jS(Ns(Rr(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__repr__scCsqt}|jd|jd|jd|jd|jd|jd|jd|jd|j d |j d |j |S( sXConstructs a :class:`PreparedRequest ` for transmission and returns it.RrR1RIRRRAR+RsRtRuRh( tPreparedRequesttprepareRrR1RIRRRAR+RsRtRuRh(R6R7((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRys            N(R_R`t__doc__R>RvRwRy(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRps  Rxc BseZdZdZddddddddddd ZdZdZdZe dZ dZ dZ dd Z d Zd d Zd ZdZRS(sThe fully mutable :class:`PreparedRequest ` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request ` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> r = req.prepare() >>> s = requests.Session() >>> s.send(r) cCsFd|_d|_d|_d|_d|_t|_d|_dS(N( R>RrR1RIt_cookiesR]RRht_body_position(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRvs      c Csk|j||j|||j||j||j||| |j|||j| dS(s6Prepares the entire request with the given parameters.N(tprepare_methodt prepare_urltprepare_headerstprepare_cookiest prepare_bodyt prepare_autht prepare_hooks( R6RrR1RIRRRARsRtRuRhR+((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRy+s   cCs d|jS(Ns(Rr(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRw=scCst}|j|_|j|_|jdk r?|jjnd|_t|j|_|j|_|j |_ |j |_ |S(N( RxRrR1RIR>tcopyR R{R]RhR|(R6R7((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR@s   '   cCs7||_|jdk r3t|jj|_ndS(sPrepares the given HTTP method.N(RrR>Rtupper(R6Rr((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR}Ks cCsOddl}y"|j|dtjd}Wn|jk rJtnX|S(Nituts46sutf-8(tidnaR?R@RLt IDNAErrort UnicodeError(thostR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt_get_idna_encoded_hostQs  " cCst|tr!|jd}ntr3t|n t|}|j}d|krz|jjd rz||_ dSy%t |\}}}}}}} Wn"t k r} t | j nX|sd} | jt|d} t| n|st d|nt|sRy|j|}Wqptk rNt dqpXn|jdrpt dn|pyd } | r| d 7} n| |7} |r| dt|7} n|sd }ntrst|tr|jd }nt| tr | jd } nt|tr.|jd }nt|trO|jd }nt| trs| jd } qsnt|ttfrt|}n|j|} | r|rd || f}q| }ntt|| |d|| g}||_ dS(sPrepares the given HTTP URL.tutf8t:thttpNsDInvalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?s Invalid URL %r: No host suppliedsURL has an invalid label.u*R0t@R.sutf-8s%s&%s(R<R&RLR'tunicodeR%tlstriptlowert startswithR1RRRtargstformatRRRRRR?RFRR"R>(R6R1RstschemeRtRtportR2R4tfragmentteterrortnetloct enc_params((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR~[sh " %       $cCsYt|_|rUx@|jD]/}t||\}}||jt|t complexjsontdumpsR<R&R?tallR=R*RNRMRdtMappingRt TypeErrortAttributeErrorRtgetattrRR|tIOErrortOSErrortobjecttNotImplementedErrorR)RIR^RFtprepare_content_lengthR](R6RARRR+R]RJt is_streamtlength((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRsJ %    cCsr|dk r7t|}|rnt||jdPrepare Content-Length header based on request method and bodysContent-LengthtGETtHEADt0N(RR(R>RR)RIRrtget(R6R]R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs   'R0cCs|dkr6t|j}t|r-|nd}n|rt|trlt|dkrlt|}n||}|jj |j|j |j ndS(s"Prepares the given HTTP auth data.iN( R>RR1tanyR<RMROR t__dict__tupdateRR](R6RtR1turl_authtr((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs ! cCs_t|tjr||_nt||_t|j|}|dk r[||jd` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. tCookieN(R<R!t CookieJarR{R R R>RI(R6Rut cookie_header((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR$s   cCs5|p g}x"|D]}|j|||qWdS(sPrepares the given hooks.N(Rl(R6RhRj((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR8s  N(R_R`RzRvR>RyRwRR}RbRR~RRRRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRxs    V E  tResponsec Bs7eZdZddddddddd d g Zd Zd Zd ZdZdZdZ dZ dZ dZ e dZe dZe dZe dZe dZdedZed"d"dZe dZe dZdZe dZd Zd!ZRS(#shThe :class:`Response ` object, which contains a server's response to an HTTP request. t_contentt status_codeRIR1thistorytencodingtreasonRutelapsedtrequestcCst|_t|_d|_d|_t|_d|_d|_ d|_ g|_ d|_ t i|_tjd|_d|_dS(Ni(RnRt_content_consumedR>t_nextRR RItrawR1RRRR Rutdatetimet timedeltaRR(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRvLs          cCs|S(N((R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __enter__{scGs|jdS(N(tclose(R6R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__exit__~scs0jsjntfdjDS(Nc3s'|]}|t|dfVqdS(N(RR>(Rftattr(R6(s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pys s(Rtcontenttdictt __attrs__(R6((R6s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __getstate__s    cCsQx*|jD]\}}t|||q Wt|dtt|dddS(NRR(RqtsetattrR@R>(R6tstateRGR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __setstate__scCs d|jS(Ns(R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRwscCs|jS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (tok(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__bool__scCs|jS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __nonzero__scCs |jdS(s,Allows you to use a response as an iterator.i(t iter_content(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR:scCs'y|jWntk r"tSXtS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (traise_for_statusRRnR@(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs  cCsd|jko|jtkS(sTrue if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). tlocation(RIRtREDIRECT_STATI(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt is_redirectscCs(d|jko'|jtjtjfkS(s@True if this Response one of the permanent versions of redirect.R(RIRR,tmoved_permanentlytpermanent_redirect(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytis_permanent_redirectscCs|jS(sTReturns a PreparedRequest for the next request in a redirect chain, if there is one.(R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytnextscCstj|jdS(s7The apparent encoding, provided by the chardet library.R(R(tdetectR(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytapparent_encodingsicsfd}jr9tjtr9tn5dk rntt rntdtnt j}|}jr|n|}|rt |}n|S(sIterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response. c3stjdry,x%jjdtD] }|Vq.WWqtk r_}t|qtk r}}t|qtk r}t |qXn.x+trjj }|sPn|VqWt_ dS(Ntstreamtdecode_content( R=RRR@RRRRRRR9R(tchunkR(t chunk_sizeR6(s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytgenerates    s.chunk_size must be an int, it is instead a %s.N( RR<RtboolRR>tintRttypeRR(R6Rtdecode_unicodeRt reused_chunkst stream_chunkstchunks((RR6s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs  ccsd}x|jd|d|D]}|dk r>||}n|rV|j|}n |j}|r|dr|r|dd|dkr|j}nd}x|D] }|VqWqW|dk r|VndS(sIterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. RRiN(R>Rtsplitt splitlinestpop(R6RRt delimitertpendingRtlinestline((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt iter_lines s   .   cCs|jtkr{|jr'tdn|jdksE|jdkrQd|_q{tj|j t prt|_nt |_|jS(s"Content of the response, in bytes.s2The content for this response was already consumediN( RRnRt RuntimeErrorRRR>R&R5RtCONTENT_CHUNK_SIZER@(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR*s   * cCsd}|j}|js"tdS|jdkr=|j}nyt|j|dd}Wn,ttfk rt|jdd}nX|S(sContent of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. R0terrorstreplaceN(R>RRR%Rt LookupErrorR(R6RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyttext>s    cKs|j r}|jr}t|jdkr}t|j}|dk r}y tj|jj||SWqztk rvqzXq}ntj|j |S(sReturns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises ValueError: If the response body does not contain valid json. iN( RRRORR>RtloadsRLtUnicodeDecodeErrorR(R6tkwargsR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR+ds(   cCsj|jjd}i}|rft|}x9|D].}|jdpR|jd}|||(R6R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs   N(R_R`RzRRvRRRRRwRRR:RaRRRRRRnRtITER_CHUNK_SIZER>RRRR+RRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRBs2 /     7&  (QRzRdRtsystencodings.idnat encodingstpip._vendor.urllib3.fieldsRtpip._vendor.urllib3.filepostRtpip._vendor.urllib3.utilRtpip._vendor.urllib3.exceptionsRRRRtioRRhRt structuresR RtR RuR R R t exceptionsRRRRRRRt_internal_utilsRRtutilsRRRRRRRRRR tcompatR!R"R#R$R%R&R'R(R)R*R+Rt status_codesR,tmovedtfoundtotherttemporary_redirectRRtDEFAULT_REDIRECT_LIMITRRRR-RcRpRxR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytsB    "4FF  nF;PK.e[7$Ypprequests/sessions.pynu[# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import platform import time from collections import Mapping from datetime import timedelta from .auth import _basic_auth_str from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body, DEFAULT_PORTS ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI # Preferred clock, based on which one is more accurate on a given system. if platform.system() == 'Windows': try: # Python 3.3+ preferred_clock = time.perf_counter except AttributeError: # Earlier than Python 3. preferred_clock = time.clock else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None def should_strip_auth(self, old_url, new_url): """Decide whether Authorization header should be removed when redirecting""" old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if (not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port): return False # Standard case: root URI must match return changed_port or changed_scheme def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) # The scheme should be lower case... parsed = urlparse(url) url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/requests/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/requests/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): # If we get redirected to a new host, we should strip out any # authentication headers. del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ proxies = proxies if proxies is not None else {} headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() no_proxy = proxies.get('no_proxy') bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) if self.trust_env and not bypass_proxy: environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request ` sent from this #: :class:`Session `. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request `. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request `. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request `. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar `, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest ` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request ` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request `, prepares it and sends it. Returns :class:`Response ` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. :rtype: Session """ return Session() PK.e[;̵BBrequests/packages.pycnu[ abc@sddlZxdD]ZdeZeeees   PK.e[@requests/api.pyonu[ abc@sqdZddlmZdZd dZdZdZd d dZd dZ d d Z d Z d S( s requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. i(tsessionsc Ks2tj }|jd|d||SWdQXdS(s Constructs and sends a :class:`Request `. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response ` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') tmethodturlN(RtSessiontrequest(RRtkwargstsession((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs)cKs&|jdttd|d||S(sOSends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tallow_redirectstgettparams(t setdefaulttTrueR(RR R((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR=s cKs |jdttd||S(sSends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response Rtoptions(R R R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR Ks cKs |jdttd||S(sSends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response Rthead(R tFalseR(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR Xs cKstd|d|d||S(sSends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tposttdatatjson(R(RRRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRes cKstd|d||S(sSends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tputR(R(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRss cKstd|d||S(sSends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tpatchR(R(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs cKstd||S(sSends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tdelete(R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs N( t__doc__tRRtNoneRR R RRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyt s -    PK.e[>l55requests/compat.pyonu[ abc@s5dZddlmZddlZejZeddkZeddkZddlZerGddl m Z m Z m Z m Z mZmZmZmZmZddlmZmZmZmZmZdd lmZddlZdd lmZdd lmZdd lmZe Z!e Z"e#Z e$Z$e%e&e'fZ(e%e&fZ)ner1dd l*mZmZmZmZmZm Z m Z m Z m Z mZddl+mZmZmZmZmZddl,m-Zdd l.mZdd l/mZdd l0mZe Z!e Z e"Z"e e"fZ$e%e'fZ(e%fZ)ndS(sq requests.compat ~~~~~~~~~~~~~~~ This module handles import compatibility issues between Python 2 and Python 3. i(tchardetNiii( tquotetunquotet quote_plust unquote_plust urlencodet getproxiest proxy_bypasstproxy_bypass_environmenttgetproxies_environment(turlparset urlunparseturljointurlsplitt urldefrag(tparse_http_list(tMorsel(tStringIO(t OrderedDict( R R R R RRRRRR(RRRRR (t cookiejar(1t__doc__t pip._vendorRtsyst version_infot_vertis_py2tis_py3tjsonturllibRRRRRRRRR R R R R Rturllib2Rt cookielibtCookieRRt)pip._vendor.urllib3.packages.ordered_dictRtstrt builtin_strtbytestunicodet basestringtinttlongtfloatt numeric_typest integer_typest urllib.parseturllib.requestthttpRt http.cookiestiot collections(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/compat.pyt sB   @( F(  PK.e[wL&d&drequests/utils.pycnu[ abc@s\dZddlZddlZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl m Z ddl mZddlmZddlmZddlmZmZmZmZmZmZmZmZmZmZmZmZm Z m!Z!dd l"m#Z#dd l$m%Z%dd l&m'Z'm(Z(m)Z)m*Z*d@Z+ej,Z-idd6dd6Z.ej/dkrdZ0dZndZ1dZ2e3dZ4dZ5dZ6dZ7dZ8dZ9e3dZ:dZ;dZ<d Z=d!Z>d"Z?d#Z@d$ZAeBd%d&ZCd'ZDd(ZEd)ZFd*ZGd+ZHd,ZIejJd-ZKd.ZLdd/ZNd0ZOd1d2ZPd3ZQd4ZRd5jSd6ZTeTd7ZUeTd8ZVd9ZWd:ZXd;ZYejZd<Z[ejZd<Z\d=Z]d>Z^d?Z_dS(As requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. iNi(t __version__(tcerts(tto_native_string(tparse_http_list(tquoteturlparsetbyteststrt OrderedDicttunquotet getproxiest proxy_bypasst urlunparset basestringt integer_typestis_py3tproxy_bypass_environmenttgetproxies_environment(tcookiejar_from_dict(tCaseInsensitiveDict(t InvalidURLt InvalidHeadertFileModeWarningtUnrewindableBodyErrors.netrct_netrciPthttpithttpstWindowscCs"trddl}n ddl}yE|j|jd}|j|dd}|j|dd}Wntk rztSX| s| rtS|jd}x|D]w}|dkrd|krt Sn|j dd }|j d d }|j d d}t j ||t j rt SqWtS( Nis;Software\Microsoft\Windows\CurrentVersion\Internet Settingst ProxyEnableit ProxyOverridet;st.s\.t*s.*t?(Rtwinregt_winregtOpenKeytHKEY_CURRENT_USERt QueryValueExtOSErrortFalsetsplittTruetreplacetretmatchtI(thostR"tinternetSettingst proxyEnablet proxyOverridettest((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytproxy_bypass_registry.s2          cCs!trt|St|SdS(sReturn True, if the host should be bypassed. Checks proxy settings gathered from the environment, if specified, or the registry. N(RRR4(R/((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyR Os  cCs"t|dr|j}n|S(s/Returns an internal sequence dictionary update.titems(thasattrR5(td((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdict_to_sequence[scCsd}d}t|dr*t|}nt|drE|j}nmt|dry|j}Wntjk rzqXtj|j}d|j krt j dt qnt|drty|j }Wn,ttfk r|dk rq|}qqqtXt|drt|dkrty3|jdd |j }|j|pIdWqqttfk rmd}qqXqtn|dkrd}ntd||S( Nit__len__tlentfilenotbs%Requests has determined the content-length for this request using the binary size of the file: however, the file has been opened in text mode (i.e. without the 'b' flag in the mode). This may lead to an incorrect content-length. In Requests 3.0, support will be removed for files in text mode.ttelltseeki(tNoneR6R:R;tiotUnsupportedOperationtostfstattst_sizetmodetwarningstwarnRR=R'tIOErrorR>tmax(tot total_lengthtcurrent_positionR;((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt super_lends@       c CseyGddlm}m}d}x^tD]V}ytjjdj|}Wntk r_dSXtjj |r&|}Pq&q&W|dkrdSt |}d}t |t r|j d}n|jj|d} yG||j| } | r| drdnd} | | | d fSWn#|tfk rE|rFqFnXWnttfk r`nXdS( s;Returns the Requests tuple auth for a given url from netrc.i(tnetrctNetrcParseErrors~/{0}Nt:tasciiiii(RNROR?t NETRC_FILESRBtpatht expandusertformattKeyErrortexistsRt isinstanceRtdecodetnetlocR)tauthenticatorsRHt ImportErrortAttributeError( turlt raise_errorsRNROt netrc_pathtftloctritsplitstrR/Rtlogin_i((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_netrc_auths8    cCs[t|dd}|rWt|trW|ddkrW|ddkrWtjj|SdS(s0Tries to guess the filename of the given object.tnameitN(tgetattrR?RXR RBRStbasename(tobjRg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytguess_filenames%cCsD|dkrdSt|ttttfr:tdnt|S(sTake an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict s+cannot encode objects that are not 2-tuplesN(R?RXRRtbooltintt ValueErrorR(tvalue((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytfrom_key_val_lists  cCse|dkrdSt|ttttfr:tdnt|tjr[|j }nt |S(sTake an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. :rtype: list s+cannot encode objects that are not 2-tuplesN( R?RXRRRnRoRpt collectionstMappingR5tlist(Rq((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytto_key_val_lists cCshg}x[t|D]M}|d |dko8dknrSt|dd!}n|j|qW|S(sParse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list iit"(t_parse_list_headertunquote_header_valuetappend(Rqtresulttitem((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_list_headers $cCsi}xt|D]~}d|kr5d||>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict t=iiRwN(RxR?R)Ry(RqR{R|Rg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_dict_header1s  $cCsq|rm|d|dko%dknrm|dd!}| sN|d dkrm|jddjddSn|S( sUnquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str iiRwiis\\s\s\"(R+(Rqt is_filename((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyRyTs * cCs+i}x|D]}|j||j/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdict_from_cookiejarms cCs t||S(sReturns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar (R(RR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytadd_dict_to_cookiejar|scCsvtjdttjddtj}tjddtj}tjd}|j||j||j|S(slReturns encodings from given content string. :param content: bytestring to extract encodings from. sIn requests 3.0, get_encodings_from_content will be removed. For more information, please see the discussion on issue #2266. (This warning should only appear once.)s!]tflagss+]s$^<\?xml.*?encoding=["\']*(.+?)["\'>](RFRGtDeprecationWarningR,tcompileR.tfindall(tcontentt charset_ret pragma_retxml_re((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_encodings_from_contentscCs_|jd}|sdStj|\}}d|krK|djdSd|kr[dSdS(s}Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str s content-typetcharsets'"ttexts ISO-8859-1N(tgetR?tcgit parse_headertstrip(theaderst content_typetparams((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_encoding_from_headerss  ccs|jdkr)x|D] }|VqWdStj|jdd}x+|D]#}|j|}|rK|VqKqKW|jddt}|r|VndS(sStream decodes a iterator.NterrorsR+ttfinal(tencodingR?tcodecstgetincrementaldecoderRYR*(titeratortrR|tdecodertchunktrv((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytstream_decode_response_unicodes    ccsdd}|dks|dkr-t|}nx0|t|kr_||||!V||7}q0WdS(s Iterate over slices of a string.iN(R?R:(tstringt slice_lengthtpos((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt iter_slicess cCstjdtg}t|j}|rcyt|j|SWqctk r_|j|qcXnyt|j|ddSWnt k r|jSXdS(sReturns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. fall back and replace all unicode characters :rtype: str sIn requests 3.0, get_unicode_from_response will be removed. For more information, please see the discussion on issue #2266. (This warning should only appear once.)RR+N( RFRGRRRRRt UnicodeErrorRzt TypeError(Rttried_encodingsR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_unicode_from_responses   t4ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzs0123456789-._~cCs|jd}xtdt|D]}||dd!}t|dkr|jrytt|d}Wn!tk rtd|nX|tkr|||d||/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytunquote_unreserveds  cCsKd}d}ytt|d|SWntk rFt|d|SXdS(sRe-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str s!#$%&'()*+,/:;=?@[]~s!#$&'()*+,/:;=?@[]~tsafeN(RRR(Rtsafe_with_percenttsafe_without_percent((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt requote_uri s  cCstjdtj|d}|jd\}}tjdtjtt|d}tjdtj|d|@}||@||@kS(sThis function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool s=Lit/(tstructtunpacktsockett inet_atonR)tdotted_netmaskRo(tiptnettipaddrtnetaddrtbitstnetmasktnetwork((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytaddress_in_network#s +#cCs/ddd|>dA}tjtjd|S(sConverts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str Iii s>I(Rt inet_ntoaRtpack(tmaskR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyR2scCs-ytj|Wntjk r(tSXtS(s :rtype: bool (RRterrorR(R*(t string_ip((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytis_ipv4_address=s cCs|jddkryt|jdd}Wntk rFtSX|dks_|dkrctSytj|jddWqtjk rtSXntStS(sV Very simple check of the cidr format in no_proxy variable. :rtype: bool Rii i( tcountRoR)RpR(RRRR*(tstring_networkR((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt is_valid_cidrHs ccst|dk }|r4tjj|}|tj|/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt set_environ`s    c Cscd}|}|d kr*|d}nt|j}|r d|jddjdD}|jdd}t|rx|D]8}t|rt||rtSq||krtSqWq x@|D]5}|j |s|jddj |rtSqWnt d|8yt |}Wn t t jfk rNt}nXWd QX|r_tStS( sL Returns whether we should bypass proxies or not. :rtype: bool cSs(tjj|p'tjj|jS(N(RBRRtupper(tk((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt|Rtno_proxycss|]}|r|VqdS(N((t.0R/((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pys st Rt,RPiN(R?RRZR+R)RRRR*tendswithRR RRtgaierrorR(( R^Rt get_proxyt no_proxy_argRZRtproxy_ipR/tbypass((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytshould_bypass_proxiesvs4  %      + cCs!t|d|riStSdS(sA Return a dict of environment proxies. :rtype: dict RN(RR (R^R((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_environ_proxiesscCs|p i}t|}|jdkrC|j|j|jdS|jd|j|jd|jdg}d}x(|D] }||krz||}PqzqzW|S(sSelect a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs talls://sall://N(RthostnameR?Rtscheme(R^tproxiesturlpartst proxy_keystproxyt proxy_key((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt select_proxys       spython-requestscCsd|tfS(sO Return a string representing the default user agent. :rtype: str s%s/%s(R(Rg((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdefault_user_agentscCs2titd6djd d6dd6dd 6S( s9 :rtype: requests.structures.CaseInsensitiveDict s User-Agents, tgziptdeflatesAccept-Encodings*/*tAccepts keep-alivet Connection(RR(RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytdefault_headerss  c Csg}d}xtjd|D]}y|jdd\}}Wntk ra|d}}nXi|jdd6}xa|jdD]P}y|jd\}}Wntk rPnX|j|||j|; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" :rtype: list s '"s, * '"R^R~(R,R)RpRRz( Rqtlinkst replace_charstvalR^Rtlinktparamtkey((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytparse_header_linkss    sRQiicCs|d }|tjtjfkr&dS|d tjkr=dS|d tjtjfkr]dS|jt}|dkr|dS|dkr|d d dtkrd S|d d dtkrd Sn|dkr|d t krd S|d t krdSnd S(s :rtype: str isutf-32is utf-8-sigisutf-16isutf-8Ns utf-16-beis utf-16-les utf-32-bes utf-32-le( Rt BOM_UTF32_LEt BOM_UTF32_BEtBOM_UTF8t BOM_UTF16_LEt BOM_UTF16_BERt_nullt_null2t_null3R?(tdatatsamplet nullcount((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytguess_json_utfs*    cCsSt||\}}}}}}|s7||}}nt||||||fS(sGiven a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str (RR (R^t new_schemeRRZRSRtquerytfragment((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytprepend_scheme_if_needed1s!cCsRt|}y"t|jt|jf}Wnttfk rMd}nX|S(s{Given a url with authentication components, extract them into a tuple of username,password. :rtype: (str,str) R(RR(RR tusernametpasswordR]R(R^tparsedtauth((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytget_auth_from_urlBs  " s^\S[^\r\n]*$|^$cCs|\}}t|tr$t}nt}y&|j|sOtd|nWn0tk rtd||t|fnXdS(sVerifies that header value is a string which doesn't contain leading whitespace or return characters. This prevents unintended header injection. :param header: tuple, in the format (name, value). s7Invalid return character or leading space in header: %ss>Value for header {%s: %s} must be of type str or bytes, not %sN(RXRt_CLEAN_HEADER_REGEX_BYTEt_CLEAN_HEADER_REGEX_STRR-RRttype(theaderRgRqtpat((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pytcheck_header_validityWs   cCsft|\}}}}}}|s4||}}n|jddd}t|||||dfS(sW Given a url remove the fragment and the authentication part. :rtype: str t@iiR(RtrsplitR (R^RRZRSRR R ((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt urldefragauthls cCs}t|jdd}|dk rmt|jtrmy||jWqyttfk ritdqyXn tddS(sfMove file pointer back to its recorded starting position so it can be read again on redirect. R>s;An error occurred when rewinding request body for redirect.s+Unable to rewind request body for redirect.N( RjtbodyR?RXt_body_positionRRHR'R(tprepared_requestt body_seek((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt rewind_body}s(s.netrcR(`t__doc__RRRst contextlibR@RBtplatformR,RRRFRRRt_internal_utilsRtcompatRRxRRRRRR R R R R RRRRtcookiesRt structuresRt exceptionsRRRRRRtwheretDEFAULT_CA_BUNDLE_PATHt DEFAULT_PORTStsystemR4R8RMR(RfRmRrRvR}RRyRRRRRRRt frozensetRRRRRRRtcontextmanagerRRR?RRRRRtencodeRRRR RRRRRRRR!(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyt s           ^"  ! = 3    #      %      9  "      PK.e[<ށrequests/packages.pynu[import sys # This code exists for backwards compatibility reasons. # I don't like it either. Just look the other way. :) for package in ('urllib3', 'idna', 'chardet'): vendored_package = "pip._vendor." + package locals()[package] = __import__(vendored_package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): if mod == vendored_package or mod.startswith(vendored_package + '.'): unprefixed_mod = mod[len("pip._vendor."):] sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] # Kinda cool, though, right? PK.e[:<#J#Jrequests/adapters.pyonu[ abc@s5dZddlZddlZddlmZmZddlmZddl m Z ddl m Z ddlmZddlmZdd lmZdd lmZdd lmZdd lmZdd lmZddlmZddlmZddlmZddlmZddlmZm Z ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'ddl(m)Z)ddl*m+Z+ddl,m-Z-m.Z.m/Z/mZmZm0Z0m1Z1ddl2m3Z3yddl4m5Z5Wne6k rdZ5nXe7Z8dZ9dZ:dZ<de=fdYZ>de>fd YZ?dS(!s requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. iN(t PoolManagertproxy_from_url(t HTTPResponse(tTimeout(tRetry(tClosedPoolError(tConnectTimeoutError(t HTTPError(t MaxRetryError(tNewConnectionError(t ProxyError(t ProtocolError(tReadTimeoutError(tSSLError(t ResponseErrori(tResponse(turlparset basestring(tDEFAULT_CA_BUNDLE_PATHtget_encoding_from_headerstprepend_scheme_if_neededtget_auth_from_urlt urldefragautht select_proxy(tCaseInsensitiveDict(textract_cookies_to_jar(tConnectionErrortConnectTimeoutt ReadTimeoutR R t RetryErrort InvalidSchema(t_basic_auth_str(tSOCKSProxyManagercOstddS(Ns'Missing dependencies for SOCKS support.(R(targstkwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR +si it BaseAdaptercBs8eZdZdZededddZdZRS(sThe Base Transport AdaptercCstt|jdS(N(tsuperR#t__init__(tself((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR%7scCs tdS(sCSends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. N(tNotImplementedError(R&trequesttstreamttimeouttverifytcerttproxies((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytsend:scCs tdS(s!Cleans up adapter specific items.N(R'(R&((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytcloseLsN( t__name__t __module__t__doc__R%tFalsetNonetTrueR.R/(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR#4s   t HTTPAdaptercBseZdZdddddgZeeeedZdZdZ ed Z d Z d Z d Z dd ZdZdZdZdZededddZRS(sThe built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session ` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) t max_retriestconfigt_pool_connectionst _pool_maxsizet _pool_blockcCs|tkr$tddt|_ntj||_i|_i|_tt|j ||_ ||_ ||_ |j ||d|dS(Nitreadtblock(tDEFAULT_RETRIESRR3R7tfrom_intR8t proxy_managerR$R6R%R9R:R;tinit_poolmanager(R&tpool_connectionst pool_maxsizeR7t pool_block((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR%ns      cstfdjDS(Nc3s'|]}|t|dfVqdS(N(tgetattrR4(t.0tattr(R&(sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pys s(tdictt __attrs__(R&((R&sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt __getstate__scCsbi|_i|_x*|jD]\}}t|||qW|j|j|jd|jdS(NR=(R@R8titemstsetattrRAR9R:R;(R&tstateRGtvalue((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt __setstate__s   c KsF||_||_||_td|d|d|dt||_dS(sInitializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. t num_poolstmaxsizeR=tstrictN(R9R:R;RR5t poolmanager(R&t connectionsRQR=t pool_kwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyRAs   c Ks||jkr|j|}n|jjdrt|\}}t|d|d|d|jd|jd|j|}|j|`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager tsockstusernametpasswordRPRQR=t proxy_headers( R@tlowert startswithRR R9R:R;RYR(R&tproxyt proxy_kwargstmanagerRWRXRY((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytproxy_manager_fors*     cCs|jjdr|rd }|tk r6|}n|sEt}n| s_tjj| rwtdj |nd|_ tjj |s||_ q||_ nd|_ d |_ d |_ |rt|ts|d|_|d|_n||_d |_|jrCtjj|j rCtdj |jn|jrtjj|j rtdj |jqnd S( sAVerify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. thttpssFCould not find a suitable TLS CA certificate bundle, invalid path: {0}t CERT_REQUIREDt CERT_NONEiis:Could not find the TLS certificate file, invalid path: {0}s2Could not find the TLS key file, invalid path: {0}N(RZR[R4R5RtostpathtexiststIOErrortformatt cert_reqstisdirtca_certst ca_cert_dirt isinstanceRt cert_filetkey_file(R&tconnturlR+R,tcert_loc((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt cert_verifys8                cCst}t|dd|_tt|di|_t|j|_||_|jj |_ t |j t r|j j d|_ n |j |_ t|j||||_||_|S(sBuilds a :class:`Response ` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter ` :param req: The :class:`PreparedRequest ` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response tstatustheaderssutf-8N(RRER4t status_codeRRtRtencodingtrawtreasonRlRptbytestdecodeRtcookiesR(t connection(R&treqtresptresponse((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytbuild_responses     cCsst||}|rEt|d}|j|}|j|}n*t|}|j}|jj|}|S(sReturns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool thttp(RRR_tconnection_from_urlRtgeturlRS(R&RpR-R\R@Rotparsed((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytget_connection"s   cCs5|jjx!|jjD]}|jqWdS(sDisposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. N(RStclearR@tvalues(R&R\((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR/9s c Cst|j|}t|jj}|o3|dk}t}|rit|jj}|jd}n|j}|r| rt|j}n|S(s?Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str R`RV( RRpRtschemeR3RZR[tpath_urlR( R&R(R-R\Rtis_proxied_http_requesttusing_socks_proxyt proxy_schemeRp((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt request_urlCs  cKsdS(s"Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter `. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` to add headers to. :param kwargs: The keyword arguments from the call to send(). N((R&R(R"((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt add_headers`s cCs8i}t|\}}|r4t|||d`. :param proxies: The url of the proxy being used for this request. :rtype: dict sProxy-Authorization(RR(R&R\RtRWRX((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyRYns cCs}|j|j|}|j||j|||j||}|j||jdkphd|jk } t|t ry%|\} } t d| d| }Wqt k r} dj |} t | qXn't|t rnt d|d|}y| s[|j d|jd|d|jd|jd td td td td |jd| }nft|drv|j}n|jdt}y"|j|j|dtx-|jjD]\}}|j||qW|jx^|jD]S}|jtt|djd|jd|j||jdqW|jdy|jdt}Wntk r|j}nXt j!|d|d|d td t}Wn|j"nXWnt#t$j%fk r} t&| d|n{t'k r} t| j(t)r=t| j(t*s=t+| d|q=nt| j(t,rdt-| d|nt| j(t.rt/| d|nt| j(t0rt1| d|nt&| d|nt2k r} t&| d|nt.k r } t/| ndt0t3fk rl} t| t0rBt1| d|qmt| t4rft5| d|qmnX|j6||S(sSends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response sContent-LengthtconnectR<ssInvalid timeout {0}. Pass a (connect, read) timeout tuple, or a single float to set both timeouts to the same valuetmethodRptbodyRttredirecttassert_same_hosttpreload_contenttdecode_contenttretriesR*t proxy_pooltskip_accept_encodingisutf-8s s0 t bufferingtpoolR|R(N(7RRpRrRRRR4RtRlttuplet TimeoutSaucet ValueErrorRgturlopenRR3R7thasattrRt _get_conntDEFAULT_POOL_TIMEOUTt putrequestR5RKt putheadert endheadersR.thextlentencodet getresponset TypeErrorRt from_httplibR/R tsocketterrorRRRxRR RRRt _ProxyErrorR t _SSLErrorR Rt _HTTPErrorR RR(R&R(R)R*R+R,R-RoRptchunkedRR<teterrR~tlow_conntheaderRNtitr((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR.s            &       N(R0R1R2RItDEFAULT_POOLSIZER>tDEFAULT_POOLBLOCKR%RJRORAR_RrRR4RR/RRRYR3R5R.(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR6Qs$      % 4 %    (@R2tos.pathRcRtpip._vendor.urllib3.poolmanagerRRtpip._vendor.urllib3.responseRtpip._vendor.urllib3.utilRRtpip._vendor.urllib3.util.retryRtpip._vendor.urllib3.exceptionsRRRRRR R RR R R RRtmodelsRtcompatRRtutilsRRRRRRt structuresRR{Rt exceptionsRRRRRtauthRt!pip._vendor.urllib3.contrib.socksR t ImportErrorR3RRR>R4RtobjectR#R6(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt sB  .4  PK.e[;̵BBrequests/packages.pyonu[ abc@sddlZxdD]ZdeZeeees   PK.e[ ;requests/hooks.pycnu[ abc@s%dZdgZdZdZdS(s requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. tresponsecCstdtDS(Ncss|]}|gfVqdS(N((t.0tevent((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pys s(tdicttHOOKS(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt default_hooksscKs{|p t}|j|}|rwt|dr?|g}nx5|D]*}|||}|dk rF|}qFqFWn|S(s6Dispatches a hook dictionary on a given piece of data.t__call__N(RtgetthasattrtNone(tkeythookst hook_datatkwargsthookt _hook_data((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt dispatch_hooks   N(t__doc__RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt s  PK.e[E=&&requests/auth.pyonu[ abc@sdZddlZddlZddlZddlZddlZddlZddlmZddl m Z m Z m Z ddl mZddlmZddlmZd Zd Zd Zd efd YZdefdYZdefdYZdefdYZdS(s] requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. iN(t b64encodei(turlparsetstrt basestring(textract_cookies_to_jar(tto_native_string(tparse_dict_headers!application/x-www-form-urlencodedsmultipart/form-datacCst|ts:tjdj|dtt|}nt|tsttjdj|dtt|}nt|tr|jd}nt|tr|jd}ndtt dj ||fj }|S(sReturns a Basic Auth string.sNon-string usernames will no longer be supported in Requests 3.0.0. Please convert the object you've passed in ({0!r}) to a string or bytes object in the near future to avoid problems.tcategorysNon-string passwords will no longer be supported in Requests 3.0.0. Please convert the object you've passed in ({0!r}) to a string or bytes object in the near future to avoid problems.tlatin1sBasic t:( t isinstanceRtwarningstwarntformattDeprecationWarningRtencodeRRtjointstrip(tusernametpasswordtauthstr((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt_basic_auth_strs&   %tAuthBasecBseZdZdZRS(s4Base class that all auth implementations derive fromcCstddS(NsAuth hooks must be callable.(tNotImplementedError(tselftr((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__call__Ks(t__name__t __module__t__doc__R(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRHst HTTPBasicAuthcBs2eZdZdZdZdZdZRS(s?Attaches HTTP Basic Authentication to the given Request object.cCs||_||_dS(N(RR(RRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__init__Rs cCs:t|jt|ddk|jt|ddkgS(NRR(tallRtgetattrtNoneR(Rtother((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__eq__VscCs ||k S(N((RR#((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__ne__\scCs t|j|j|jd<|S(Nt Authorization(RRRtheaders(RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyR_s(RRRRR$R%R(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyROs    t HTTPProxyAuthcBseZdZdZRS(s=Attaches HTTP Proxy Authentication to a given Request object.cCs t|j|j|jd<|S(NsProxy-Authorization(RRRR'(RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRgs(RRRR(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyR(dstHTTPDigestAuthcBsVeZdZdZdZdZdZdZdZdZ dZ RS( s@Attaches HTTP Digest Authentication to the given Request object.cCs%||_||_tj|_dS(N(RRt threadingtlocalt _thread_local(RRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRos  cCsat|jds]t|j_d|j_d|j_i|j_d|j_d|j_ ndS(Ntinitti( thasattrR,tTrueR-t last_noncet nonce_counttchalR"tpost num_401_calls(R((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytinit_per_thread_stateus     csN|jjd}|jjd}|jjjd}|jjjd}|jjjd}d|dkrzd}n |j}|dks|dkrd} | n|d krd } | nfd } dkrdSd} t|} | jp d }| jr+|d | j7}nd|j||j f}d||f}|}|}||jj kr|jj d7_ n d|j_ d|jj }t |jj j d}||j d7}|tjj d7}|tjd7}tj|jd }|dkrJd|||f}n|sl| |d||f}nP|dksd|jdkrd|||d|f}| ||}ndS||j_ d|j||||f}|r|d|7}n|r|d|7}n| r)|d| 7}n|rF|d||f7}nd|S(s :rtype: str trealmtnoncetqopt algorithmtopaquetMD5sMD5-SESScSs4t|tr!|jd}ntj|jS(Nsutf-8(R RRthashlibtmd5t hexdigest(tx((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytmd5_utf8stSHAcSs4t|tr!|jd}ntj|jS(Nsutf-8(R RRR=tsha1R?(R@((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytsha_utf8scsd||fS(Ns%s:%s((tstd(t hash_utf8(s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytR.t/t?s%s:%s:%ss%s:%sis%08xsutf-8iitautht,s%s:%s:%s:%s:%ss>username="%s", realm="%s", nonce="%s", uri="%s", response="%s"s , opaque="%s"s, algorithm="%s"s , digest="%s"s , qop="auth", nc=%s, cnonce="%s"s Digest %sN(R,R3tgetR"tupperRtpathtqueryRRR1R2RRttimetctimetosturandomR=RCR?tsplit(RtmethodturlR7R8R9R:R;t _algorithmRARDtKDtentdigtp_parsedROtA1tA2tHA1tHA2tncvalueREtcnoncetrespdigtnoncebittbase((RGs=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytbuild_digest_headersr               ! cKs|jrd|j_ndS(s)Reset num_401_calls counter on redirects.iN(t is_redirectR,R5(RRtkwargs((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pythandle_redirects cKsd|jkodkns/d|j_|S|jjd k r]|jjj|jjn|jj dd}d|j kr~|jjdkr~|jjd7_t j dd t j }t|jd|d d|j_|j|j|jj}t|j|j|j|j|j|j|j|j|jd <|jj||}|jj|||_|Sd|j_|S( so Takes the given response and tries digest-auth, if needed. :rtype: requests.Response iiiswww-authenticateR.tdigestisdigest tflagstcountR&N(t status_codeR,R5R4R"trequesttbodytseekR'RMtlowertretcompilet IGNORECASERtsubR3tcontenttclosetcopyRt_cookiestrawtprepare_cookiesReRVRWt connectiontsendthistorytappend(RRRgts_authtpattprept_r((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt handle_401s.  $$   cCs|j|jjr8|j|j|j|jds$       ,PK.e[@requests/api.pycnu[ abc@sqdZddlmZdZd dZdZdZd d dZd dZ d d Z d Z d S( s requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. i(tsessionsc Ks2tj }|jd|d||SWdQXdS(s Constructs and sends a :class:`Request `. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response ` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') tmethodturlN(RtSessiontrequest(RRtkwargstsession((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs)cKs&|jdttd|d||S(sOSends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tallow_redirectstgettparams(t setdefaulttTrueR(RR R((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR=s cKs |jdttd||S(sSends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response Rtoptions(R R R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR Ks cKs |jdttd||S(sSends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response Rthead(R tFalseR(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyR Xs cKstd|d|d||S(sSends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tposttdatatjson(R(RRRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRes cKstd|d||S(sSends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tputR(R(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRss cKstd|d||S(sSends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tpatchR(R(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs cKstd||S(sSends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response ` object :rtype: requests.Response tdelete(R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyRs N( t__doc__tRRtNoneRR R RRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/requests/api.pyt s -    PK.e[#PRRRrequests/adapters.pynu[# -*- coding: utf-8 -*- """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url from pip._vendor.urllib3.response import HTTPResponse from pip._vendor.urllib3.util import Timeout as TimeoutSauce from pip._vendor.urllib3.util.retry import Retry from pip._vendor.urllib3.exceptions import ClosedPoolError from pip._vendor.urllib3.exceptions import ConnectTimeoutError from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError from pip._vendor.urllib3.exceptions import MaxRetryError from pip._vendor.urllib3.exceptions import NewConnectionError from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError from pip._vendor.urllib3.exceptions import ProtocolError from pip._vendor.urllib3.exceptions import ReadTimeoutError from pip._vendor.urllib3.exceptions import SSLError as _SSLError from pip._vendor.urllib3.exceptions import ResponseError from .models import Response from .compat import urlparse, basestring from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict from .cookies import extract_cookies_to_jar from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, ProxyError, RetryError, InvalidSchema) from .auth import _basic_auth_str try: from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): super(BaseAdapter, self).__init__() def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session ` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', '_pool_block'] def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith('socks'): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith('https') and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " "invalid path: {0}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {0}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " "invalid path: {0}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response ` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter ` :param req: The :class:`PreparedRequest ` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, 'status', None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter `. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param proxies: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = not (request.body is None or 'Content-Length' in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. err = ("Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout ) # Send the request. else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send(b'\r\n') low_conn.send(i) low_conn.send(b'\r\n') low_conn.send(b'0\r\n\r\n') # Receive the response from the server try: # For Python 2.7+ versions, use buffering of HTTP # responses r = low_conn.getresponse(buffering=True) except TypeError: # For compatibility with Python 2.6 versions and back r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False ) except: # If we hit any problems here, clean up the connection. # Then, reraise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp) PK.e[)jjrequests/certs.pyonu[ abc@s1dZddlmZedkr-eGHndS(sF requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. i(twheret__main__N(t__doc__tpip._vendor.certifiRt__name__(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/certs.pyts PK.e[urequests/_internal_utils.pycnu[ abc@s;dZddlmZmZmZddZdZdS(s requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) i(tis_py2t builtin_strtstrtasciicCsCt|tr|}n'tr0|j|}n|j|}|S(sGiven a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. (t isinstanceRRtencodetdecode(tstringtencodingtout((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pytto_native_strings  cCsCt|tsty|jdtSWntk r>tSXdS(sDetermine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool RN(RRtAssertionErrorRtTruetUnicodeEncodeErrortFalse(tu_string((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pytunicode_is_asciis   N(t__doc__tcompatRRRR R(((sH/usr/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pyt s PK.e[fƴ^/l/lrequests/utils.pynu[# -*- coding: utf-8 -*- """ requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. """ import cgi import codecs import collections import contextlib import io import os import platform import re import socket import struct import warnings from .__version__ import __version__ from . import certs # to_native_string is unused here, but imported here for backwards compatibility from ._internal_utils import to_native_string from .compat import parse_http_list as _parse_list_header from .compat import ( quote, urlparse, bytes, str, OrderedDict, unquote, getproxies, proxy_bypass, urlunparse, basestring, integer_types, is_py3, proxy_bypass_environment, getproxies_environment) from .cookies import cookiejar_from_dict from .structures import CaseInsensitiveDict from .exceptions import ( InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) NETRC_FILES = ('.netrc', '_netrc') DEFAULT_CA_BUNDLE_PATH = certs.where() DEFAULT_PORTS = {'http': 80, 'https': 443} if platform.system() == 'Windows': # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): if is_py3: import winreg else: import _winreg as winreg try: internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') proxyEnable = winreg.QueryValueEx(internetSettings, 'ProxyEnable')[0] proxyOverride = winreg.QueryValueEx(internetSettings, 'ProxyOverride')[0] except OSError: return False if not proxyEnable or not proxyOverride: return False # make a check value list from the registry entry: replace the # '' string by the localhost entry and the corresponding # canonical entry. proxyOverride = proxyOverride.split(';') # now check if we match one of the registry values. for test in proxyOverride: if test == '': if '.' not in host: return True test = test.replace(".", r"\.") # mask dots test = test.replace("*", r".*") # change glob sequence test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False def proxy_bypass(host): # noqa """Return True, if the host should be bypassed. Checks proxy settings gathered from the environment, if specified, or the registry. """ if getproxies_environment(): return proxy_bypass_environment(host) else: return proxy_bypass_registry(host) def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, 'items'): d = d.items() return d def super_len(o): total_length = None current_position = 0 if hasattr(o, '__len__'): total_length = len(o) elif hasattr(o, 'len'): total_length = o.len elif hasattr(o, 'fileno'): try: fileno = o.fileno() except io.UnsupportedOperation: pass else: total_length = os.fstat(fileno).st_size # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. if 'b' not in o.mode: warnings.warn(( "Requests has determined the content-length for this " "request using the binary size of the file: however, the " "file has been opened in text mode (i.e. without the 'b' " "flag in the mode). This may lead to an incorrect " "content-length. In Requests 3.0, support will be removed " "for files in text mode."), FileModeWarning ) if hasattr(o, 'tell'): try: current_position = o.tell() except (OSError, IOError): # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and # let requests chunk it instead. if total_length is not None: current_position = total_length else: if hasattr(o, 'seek') and total_length is None: # StringIO and BytesIO have seek but no useable fileno try: # seek to end of file o.seek(0, 2) total_length = o.tell() # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) except (OSError, IOError): total_length = 0 if total_length is None: total_length = 0 return max(0, total_length - current_position) def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" try: from netrc import netrc, NetrcParseError netrc_path = None for f in NETRC_FILES: try: loc = os.path.expanduser('~/{0}'.format(f)) except KeyError: # os.path.expanduser can fail when $HOME is undefined and # getpwuid fails. See http://bugs.python.org/issue20164 & # https://github.com/requests/requests/issues/1846 return if os.path.exists(loc): netrc_path = loc break # Abort early if there isn't one. if netrc_path is None: return ri = urlparse(url) # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. splitstr = b':' if isinstance(url, str): splitstr = splitstr.decode('ascii') host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password login_i = (0 if _netrc[0] else 1) return (_netrc[login_i], _netrc[2]) except (NetrcParseError, IOError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: raise # AppEngine hackiness. except (ImportError, AttributeError): pass def guess_filename(obj): """Tries to guess the filename of the given object.""" name = getattr(obj, 'name', None) if (name and isinstance(name, basestring) and name[0] != '<' and name[-1] != '>'): return os.path.basename(name) def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value) def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. :rtype: list """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, collections.Mapping): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission). def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict """ result = {} for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result # From mitsuhiko/werkzeug (used with permission). def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. :rtype: dict """ cookie_dict = {} for cookie in cj: cookie_dict[cookie.name] = cookie.value return cookie_dict def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar """ return cookiejar_from_dict(cookie_dict, cj) def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ warnings.warn(( 'In requests 3.0, get_encodings_from_content will be removed. For ' 'more information, please see the discussion on issue #2266. (This' ' warning should only appear once.)'), DeprecationWarning) charset_re = re.compile(r']', flags=re.I) pragma_re = re.compile(r']', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') return (charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content)) def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str """ content_type = headers.get('content-type') if not content_type: return None content_type, params = cgi.parse_header(content_type) if 'charset' in params: return params['charset'].strip("'\"") if 'text' in content_type: return 'ISO-8859-1' def stream_decode_response_unicode(iterator, r): """Stream decodes a iterator.""" if r.encoding is None: for item in iterator: yield item return decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode(b'', final=True) if rv: yield rv def iter_slices(string, slice_length): """Iterate over slices of a string.""" pos = 0 if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): yield string[pos:pos + slice_length] pos += slice_length def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. fall back and replace all unicode characters :rtype: str """ warnings.warn(( 'In requests 3.0, get_unicode_from_response will be removed. For ' 'more information, please see the discussion on issue #2266. (This' ' warning should only appear once.)'), DeprecationWarning) tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors='replace') except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts) def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str """ safe_with_percent = "!#$%&'()*+,/:;=?@[]~" safe_without_percent = "!#$&'()*+,/:;=?@[]~" try: # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, # unreserved, or '%') return quote(unquote_unreserved(uri), safe=safe_with_percent) except InvalidURL: # We couldn't unquote the given URI, so let's try quoting it, but # there may be unquoted '%'s in the URI. We need to make sure they're # properly quoted so they do not cause issues elsewhere. return quote(uri, safe=safe_without_percent) def address_in_network(ip, net): """This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool """ ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] netaddr, bits = net.split('/') netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) def dotted_netmask(mask): """Converts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str """ bits = 0xffffffff ^ (1 << 32 - mask) - 1 return socket.inet_ntoa(struct.pack('>I', bits)) def is_ipv4_address(string_ip): """ :rtype: bool """ try: socket.inet_aton(string_ip) except socket.error: return False return True def is_valid_cidr(string_network): """ Very simple check of the cidr format in no_proxy variable. :rtype: bool """ if string_network.count('/') == 1: try: mask = int(string_network.split('/')[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split('/')[0]) except socket.error: return False else: return False return True @contextlib.contextmanager def set_environ(env_name, value): """Set the environment variable 'env_name' to 'value' Save previous value, yield, and then restore the previous value stored in the environment variable 'env_name'. If 'value' is None, do nothing""" value_changed = value is not None if value_changed: old_value = os.environ.get(env_name) os.environ[env_name] = value try: yield finally: if value_changed: if old_value is None: del os.environ[env_name] else: os.environ[env_name] = old_value def should_bypass_proxies(url, no_proxy): """ Returns whether we should bypass proxies or not. :rtype: bool """ get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: no_proxy = get_proxy('no_proxy') netloc = urlparse(url).netloc if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. no_proxy = ( host for host in no_proxy.replace(' ', '').split(',') if host ) ip = netloc.split(':')[0] if is_ipv4_address(ip): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(ip, proxy_ip): return True elif ip == proxy_ip: # If no_proxy ip was defined in plain IP notation instead of cidr notation & # matches the IP of the index return True else: for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True # If the system proxy settings indicate that this URL should be bypassed, # don't proxy. # The proxy_bypass function is incredibly buggy on OS X in early versions # of Python 2.6, so allow this call to fail. Only catch the specific # exceptions we've seen, though: this call failing in other ways can reveal # legitimate problems. with set_environ('no_proxy', no_proxy_arg): try: bypass = proxy_bypass(netloc) except (TypeError, socket.gaierror): bypass = False if bypass: return True return False def get_environ_proxies(url, no_proxy=None): """ Return a dict of environment proxies. :rtype: dict """ if should_bypass_proxies(url, no_proxy=no_proxy): return {} else: return getproxies() def select_proxy(url, proxies): """Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs """ proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get('all')) proxy_keys = [ urlparts.scheme + '://' + urlparts.hostname, urlparts.scheme, 'all://' + urlparts.hostname, 'all', ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy def default_user_agent(name="python-requests"): """ Return a string representing the default user agent. :rtype: str """ return '%s/%s' % (name, __version__) def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ return CaseInsensitiveDict({ 'User-Agent': default_user_agent(), 'Accept-Encoding': ', '.join(('gzip', 'deflate')), 'Accept': '*/*', 'Connection': 'keep-alive', }) def parse_header_links(value): """Return a dict of parsed link headers proxies. i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" :rtype: list """ links = [] replace_chars = ' \'"' for val in re.split(', *<', value): try: url, params = val.split(';', 1) except ValueError: url, params = val, '' link = {'url': url.strip('<> \'"')} for param in params.split(';'): try: key, value = param.split('=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links # Null bytes; no need to recreate these on each call to guess_json_utf _null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 def guess_json_utf(data): """ :rtype: str """ # JSON always starts with two ASCII characters, so detection is as # easy as counting the nulls and from their location and count # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): return 'utf-32' # BOM included if sample[:3] == codecs.BOM_UTF8: return 'utf-8-sig' # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): return 'utf-16' # BOM included nullcount = sample.count(_null) if nullcount == 0: return 'utf-8' if nullcount == 2: if sample[::2] == _null2: # 1st and 3rd are null return 'utf-16-be' if sample[1::2] == _null2: # 2nd and 4th are null return 'utf-16-le' # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: return 'utf-32-be' if sample[1:] == _null3: return 'utf-32-le' # Did not detect a valid UTF-32 ascii-range character return None def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment)) def get_auth_from_url(url): """Given a url with authentication components, extract them into a tuple of username,password. :rtype: (str,str) """ parsed = urlparse(url) try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): auth = ('', '') return auth # Moved outside of function to avoid recompile every call _CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') _CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') def check_header_validity(header): """Verifies that header value is a string which doesn't contain leading whitespace or return characters. This prevents unintended header injection. :param header: tuple, in the format (name, value). """ name, value = header if isinstance(value, bytes): pat = _CLEAN_HEADER_REGEX_BYTE else: pat = _CLEAN_HEADER_REGEX_STR try: if not pat.match(value): raise InvalidHeader("Invalid return character or leading space in header: %s" % name) except TypeError: raise InvalidHeader("Value for header {%s: %s} must be of type str or " "bytes, not %s" % (name, value, type(value))) def urldefragauth(url): """ Given a url remove the fragment and the authentication part. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url) # see func:`prepend_scheme_if_needed` if not netloc: netloc, path = path, netloc netloc = netloc.rsplit('@', 1)[-1] return urlunparse((scheme, netloc, path, params, query, '')) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ body_seek = getattr(prepared_request.body, 'seek', None) if body_seek is not None and isinstance(prepared_request._body_position, integer_types): try: body_seek(prepared_request._body_position) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect.") else: raise UnrewindableBodyError("Unable to rewind request body for redirect.") PK.e[yڣrequests/hooks.pynu[# -*- coding: utf-8 -*- """ requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. """ HOOKS = ['response'] def default_hooks(): return dict((event, []) for event in HOOKS) # TODO: response is the only one def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or dict() hooks = hooks.get(key) if hooks: if hasattr(hooks, '__call__'): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data PK.e[)jjrequests/certs.pycnu[ abc@s1dZddlmZedkr-eGHndS(sF requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. i(twheret__main__N(t__doc__tpip._vendor.certifiRt__name__(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/certs.pyts PK.e[wrrrequests/models.pycnu[ abc@sdZddlZddlZddlZddlZddlmZddlm Z ddl m Z ddl m Z mZmZmZddlmZdd lmZdd lmZdd lmZdd lmZmZmZdd lmZmZm Z m!Z!m"Z"m#Z#m$Z$ddl%m&Z&m'Z'ddl(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2ddl3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=ddl3m>Z?ddl@mAZAeAjBeAjCeAjDeAjEeAjFfZGdZHddZIdZJdeKfdYZLdeKfdYZMdeMfdYZNdeLeMfdYZOdeKfd YZPdS(!s` requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. iN(t RequestField(tencode_multipart_formdata(t parse_url(t DecodeErrortReadTimeoutErrort ProtocolErrortLocationParseError(tUnsupportedOperationi(t default_hooks(tCaseInsensitiveDict(t HTTPBasicAuth(tcookiejar_from_dicttget_cookie_headert_copy_cookie_jar(t HTTPErrort MissingSchemat InvalidURLtChunkedEncodingErrortContentDecodingErrortConnectionErrortStreamConsumedError(tto_native_stringtunicode_is_ascii( tguess_filenametget_auth_from_urlt requote_uritstream_decode_response_unicodetto_key_val_listtparse_header_linkst iter_slicestguess_json_utft super_lentcheck_header_validity( t cookielibt urlunparseturlsplitt urlencodetstrtbytestis_py2tchardett builtin_strt basestring(tjson(tcodesii iitRequestEncodingMixincBs5eZedZedZedZRS(cCssg}t|j}|j}|s-d}n|j||j}|rf|jd|j|ndj|S(sBuild the path URL to use.t/t?t(R#turltpathtappendtquerytjoin(tselfR1tpR2R4((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytpath_url=s     cCst|ttfr|St|dr,|St|drg}xt|D]\}}t|tsyt|d r|g}nxl|D]d}|dk r|jt|tr|jdn|t|tr|jdn|fqqWqNWt |dt S|SdS(sEncode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. treadt__iter__sutf-8tdoseqN( t isinstanceR%R&thasattrRR*tNoneR3tencodeR$tTrue(tdatatresulttktvstv((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt_encode_paramsRs    !3c Cs]|stdnt|tr3tdng}t|pEi}t|pWi}x|D]\}}t|tst|d r|g}nx|D]}|d k rt|tst|}n|jt|tr|j dn|t|tr|j dn|fqqWqdWx|D] \}}d }d } t|t t frt |dkr|\} } qt |dkr|\} } }q|\} } }} nt|p|} |} t| tttfr| } n | j} td|d| d | d | } | jd ||j| q3Wt|\}}||fS( sBuild the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). sFiles must be provided.sData must not be a string.R:sutf-8iitnameRAtfilenametheaderst content_typeN(t ValueErrorR<R*RR=R>R&R%R3tdecodeR?ttupletlisttlenRt bytearrayR9Rtmake_multipartR(tfilesRAt new_fieldstfieldstfieldtvalRERCtfttfhtfntfptfdatatrftbodyRJ((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt _encode_filesmsH    !3  !(t__name__t __module__tpropertyR8t staticmethodRFR^(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR-<stRequestHooksMixincBseZdZdZRS(cCs||jkr"td|nt|tjrK|j|j|n0t|dr{|j|jd|DndS(sProperly register a hook.s1Unsupported event specified, with event name "%s"R:css'|]}t|tjr|VqdS(N(R<t collectionstCallable(t.0th((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pys sN(thooksRKR<RdReR3R=textend(R6teventthook((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt register_hooks cCs5y|j|j|tSWntk r0tSXdS(siDeregister a previously registered hook. Returns True if the hook existed, False if not. N(RhtremoveR@RKtFalse(R6RjRk((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytderegister_hooks  (R_R`RlRo(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRcs tRequestc BsGeZdZddddddddddd ZdZdZRS(sA user-created :class:`Request ` object. Used to prepare a :class:`PreparedRequest `, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> req.prepare() c Cs|dkrgn|}|dkr*gn|}|dkrBin|}|dkrZin|}| dkrrin| } t|_x6t| jD]"\} } |jd| d| qW||_||_||_||_ ||_ | |_ ||_ ||_ ||_dS(NRjRk(R>RRhRNtitemsRltmethodR1RIRRRAR+tparamstauthtcookies( R6RrR1RIRRRARsRtRuRhR+RCRE((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__init__s"         cCs d|jS(Ns(Rr(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__repr__scCsqt}|jd|jd|jd|jd|jd|jd|jd|jd|j d |j d |j |S( sXConstructs a :class:`PreparedRequest ` for transmission and returns it.RrR1RIRRRAR+RsRtRuRh( tPreparedRequesttprepareRrR1RIRRRAR+RsRtRuRh(R6R7((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRys            N(R_R`t__doc__R>RvRwRy(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRps  Rxc BseZdZdZddddddddddd ZdZdZdZe dZ dZ dZ dd Z d Zd d Zd ZdZRS(sThe fully mutable :class:`PreparedRequest ` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request ` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> r = req.prepare() >>> s = requests.Session() >>> s.send(r) cCsFd|_d|_d|_d|_d|_t|_d|_dS(N( R>RrR1RIt_cookiesR]RRht_body_position(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRvs      c Csk|j||j|||j||j||j||| |j|||j| dS(s6Prepares the entire request with the given parameters.N(tprepare_methodt prepare_urltprepare_headerstprepare_cookiest prepare_bodyt prepare_autht prepare_hooks( R6RrR1RIRRRARsRtRuRhR+((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRy+s   cCs d|jS(Ns(Rr(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRw=scCst}|j|_|j|_|jdk r?|jjnd|_t|j|_|j|_|j |_ |j |_ |S(N( RxRrR1RIR>tcopyR R{R]RhR|(R6R7((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR@s   '   cCs7||_|jdk r3t|jj|_ndS(sPrepares the given HTTP method.N(RrR>Rtupper(R6Rr((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR}Ks cCsOddl}y"|j|dtjd}Wn|jk rJtnX|S(Nituts46sutf-8(tidnaR?R@RLt IDNAErrort UnicodeError(thostR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt_get_idna_encoded_hostQs  " cCst|tr!|jd}ntr3t|n t|}|j}d|krz|jjd rz||_ dSy%t |\}}}}}}} Wn"t k r} t | j nX|sd} | jt|d} t| n|st d|nt|sRy|j|}Wqptk rNt dqpXn|jdrpt dn|pyd } | r| d 7} n| |7} |r| dt|7} n|sd }ntrst|tr|jd }nt| tr | jd } nt|tr.|jd }nt|trO|jd }nt| trs| jd } qsnt|ttfrt|}n|j|} | r|rd || f}q| }ntt|| |d|| g}||_ dS(sPrepares the given HTTP URL.tutf8t:thttpNsDInvalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?s Invalid URL %r: No host suppliedsURL has an invalid label.u*R0t@R.sutf-8s%s&%s(R<R&RLR'tunicodeR%tlstriptlowert startswithR1RRRtargstformatRRRRRR?RFRR"R>(R6R1RstschemeRtRtportR2R4tfragmentteterrortnetloct enc_params((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR~[sh " %       $cCsYt|_|rUx@|jD]/}t||\}}||jt|t complexjsontdumpsR<R&R?tallR=R*RNRMRdtMappingRt TypeErrortAttributeErrorRtgetattrRR|tIOErrortOSErrortobjecttNotImplementedErrorR)RIR^RFtprepare_content_lengthR](R6RARRR+R]RJt is_streamtlength((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRsJ %    cCsr|dk r7t|}|rnt||jdPrepare Content-Length header based on request method and bodysContent-LengthtGETtHEADt0N(RR(R>RR)RIRrtget(R6R]R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs   'R0cCs|dkr6t|j}t|r-|nd}n|rt|trlt|dkrlt|}n||}|jj |j|j |j ndS(s"Prepares the given HTTP auth data.iN( R>RR1tanyR<RMROR t__dict__tupdateRR](R6RtR1turl_authtr((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs ! cCs_t|tjr||_nt||_t|j|}|dk r[||jd` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. tCookieN(R<R!t CookieJarR{R R R>RI(R6Rut cookie_header((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR$s   cCs5|p g}x"|D]}|j|||qWdS(sPrepares the given hooks.N(Rl(R6RhRj((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR8s  N(R_R`RzRvR>RyRwRR}RbRR~RRRRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRxs    V E  tResponsec Bs7eZdZddddddddd d g Zd Zd Zd ZdZdZdZ dZ dZ dZ e dZe dZe dZe dZe dZdedZed"d"dZe dZe dZdZe dZd Zd!ZRS(#shThe :class:`Response ` object, which contains a server's response to an HTTP request. t_contentt status_codeRIR1thistorytencodingtreasonRutelapsedtrequestcCst|_t|_d|_d|_t|_d|_d|_ d|_ g|_ d|_ t i|_tjd|_d|_dS(Ni(RnRt_content_consumedR>t_nextRR RItrawR1RRRR Rutdatetimet timedeltaRR(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRvLs          cCs|S(N((R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __enter__{scGs|jdS(N(tclose(R6R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__exit__~scs0jsjntfdjDS(Nc3s'|]}|t|dfVqdS(N(RR>(Rftattr(R6(s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pys s(Rtcontenttdictt __attrs__(R6((R6s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __getstate__s    cCsQx*|jD]\}}t|||q Wt|dtt|dddS(NRR(RqtsetattrR@R>(R6tstateRGR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __setstate__scCs d|jS(Ns(R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRwscCs|jS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (tok(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt__bool__scCs|jS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt __nonzero__scCs |jdS(s,Allows you to use a response as an iterator.i(t iter_content(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR:scCs'y|jWntk r"tSXtS(skReturns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. (traise_for_statusRRnR@(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs  cCsd|jko|jtkS(sTrue if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). tlocation(RIRtREDIRECT_STATI(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt is_redirectscCs(d|jko'|jtjtjfkS(s@True if this Response one of the permanent versions of redirect.R(RIRR,tmoved_permanentlytpermanent_redirect(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytis_permanent_redirectscCs|jS(sTReturns a PreparedRequest for the next request in a redirect chain, if there is one.(R(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytnextscCstj|jdS(s7The apparent encoding, provided by the chardet library.R(R(tdetectR(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytapparent_encodingsicsfd}jr9tjtr9tn5dk rntt rntdtnt j}|}jr|n|}|rt |}n|S(sIterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response. c3stjdry,x%jjdtD] }|Vq.WWqtk r_}t|qtk r}}t|qtk r}t |qXn.x+trjj }|sPn|VqWt_ dS(Ntstreamtdecode_content( R=RRR@RRRRRRR9R(tchunkR(t chunk_sizeR6(s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytgenerates    s.chunk_size must be an int, it is instead a %s.N( RR<RtboolRR>tintRttypeRR(R6Rtdecode_unicodeRt reused_chunkst stream_chunkstchunks((RR6s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs  ccsd}x|jd|d|D]}|dk r>||}n|rV|j|}n |j}|r|dr|r|dd|dkr|j}nd}x|D] }|VqWqW|dk r|VndS(sIterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. RRiN(R>Rtsplitt splitlinestpop(R6RRt delimitertpendingRtlinestline((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyt iter_lines s   .   cCs|jtkr{|jr'tdn|jdksE|jdkrQd|_q{tj|j t prt|_nt |_|jS(s"Content of the response, in bytes.s2The content for this response was already consumediN( RRnRt RuntimeErrorRRR>R&R5RtCONTENT_CHUNK_SIZER@(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR*s   * cCsd}|j}|js"tdS|jdkr=|j}nyt|j|dd}Wn,ttfk rt|jdd}nX|S(sContent of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. R0terrorstreplaceN(R>RRR%Rt LookupErrorR(R6RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyttext>s    cKs|j r}|jr}t|jdkr}t|j}|dk r}y tj|jj||SWqztk rvqzXq}ntj|j |S(sReturns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises ValueError: If the response body does not contain valid json. iN( RRRORR>RtloadsRLtUnicodeDecodeErrorR(R6tkwargsR((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyR+ds(   cCsj|jjd}i}|rft|}x9|D].}|jdpR|jd}|||(R6R((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRs   N(R_R`RzRRvRRRRRwRRR:RaRRRRRRnRtITER_CHUNK_SIZER>RRRR+RRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pyRBs2 /     7&  (QRzRdRtsystencodings.idnat encodingstpip._vendor.urllib3.fieldsRtpip._vendor.urllib3.filepostRtpip._vendor.urllib3.utilRtpip._vendor.urllib3.exceptionsRRRRtioRRhRt structuresR RtR RuR R R t exceptionsRRRRRRRt_internal_utilsRRtutilsRRRRRRRRRR tcompatR!R"R#R$R%R&R'R(R)R*R+Rt status_codesR,tmovedtfoundtotherttemporary_redirectRRtDEFAULT_REDIRECT_LIMITRRRR-RcRpRxR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/requests/models.pytsB    "4FF  nF;PK.e[&&requests/auth.pynu[# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib import threading import warnings from base64 import b64encode from .compat import urlparse, str, basestring from .cookies import extract_cookies_to_jar from ._internal_utils import to_native_string from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" # "I want us to put a big-ol' comment on top of it that # says that this behaviour is dumb but we need to preserve # it because people are relying on it." # - Lukasa # # These are here solely to maintain backwards compatibility # for things like ints. This will be removed in 3.0.0. if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({0!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, ) username = str(username) if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({0!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(password), category=DeprecationWarning, ) password = str(password) # -- End Removal -- if isinstance(username, str): username = username.encode('latin1') if isinstance(password, str): password = password.encode('latin1') authstr = 'Basic ' + to_native_string( b64encode(b':'.join((username, password))).strip() ) return authstr class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __eq__(self, other): return all([ self.username == getattr(other, 'username', None), self.password == getattr(other, 'password', None) ]) def __ne__(self, other): return not self == other def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password # Keep state in per-thread local storage self._thread_local = threading.local() def init_per_thread_state(self): # Ensure state is initialized just once per-thread if not hasattr(self._thread_local, 'init'): self._thread_local.init = True self._thread_local.last_nonce = '' self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None self._thread_local.num_401_calls = None def build_digest_header(self, method, url): """ :rtype: str """ realm = self._thread_local.chal['realm'] nonce = self._thread_local.chal['nonce'] qop = self._thread_local.chal.get('qop') algorithm = self._thread_local.chal.get('algorithm') opaque = self._thread_local.chal.get('opaque') hash_utf8 = None if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self._thread_local.last_nonce: self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 ncvalue = '%08x' % self._thread_local.nonce_count s = str(self._thread_local.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if not qop: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): noncebit = "%s:%s:%s:%s:%s" % ( nonce, ncvalue, cnonce, 'auth', HA2 ) respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" if r.is_redirect: self._thread_local.num_401_calls = 1 def handle_401(self, r, **kwargs): """ Takes the given response and tries digest-auth, if needed. :rtype: requests.Response """ # If response is not 4xx, do not auth # See https://github.com/requests/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r'digest ', flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r def __call__(self, r): # Initialize per-thread state, if needed self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: # In the case of HTTPDigestAuth being reused and the body of # the previous request was a file-like object, pos has the # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): return all([ self.username == getattr(other, 'username', None), self.password == getattr(other, 'password', None) ]) def __ne__(self, other): return not self == other PK.e[\++requests/structures.pyonu[ abc@sUdZddlZddlmZdejfdYZdefdYZdS( sO requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. iNi(t OrderedDicttCaseInsensitiveDictcBskeZdZd dZdZdZdZdZdZ dZ dZ d Z d Z RS( sA case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. cKs5t|_|dkr!i}n|j||dS(N(Rt_storetNonetupdate(tselftdatatkwargs((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__init__*s   cCs||f|j|j<s(Rtvalues(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__iter__;scCs t|jS(N(tlenR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__len__>scCsd|jjDS(s.Like iteritems(), but with all lowercase keys.css%|]\}}||dfVqdS(iN((Rtlowerkeytkeyval((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pys Ds(Rtitems(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt lower_itemsAscCsGt|tjr!t|}ntSt|jt|jkS(N(t isinstancet collectionstMappingRtNotImplementedtdictR(Rtother((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__eq__IscCst|jjS(N(RRR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pytcopyRscCstt|jS(N(tstrRR(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyt__repr__UsN(t__name__t __module__t__doc__RRR R RRRRR R!R#(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyRs        t LookupDictcBs8eZdZddZdZdZddZRS(sDictionary lookup object.cCs ||_tt|jdS(N(tnametsuperR'R(RR(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR\s cCs d|jS(Ns (R((R((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR#`scCs|jj|dS(N(t__dict__tgetR(RR ((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR cscCs|jj||S(N(R*R+(RR tdefault((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR+hsN(R$R%R&RRR#R R+(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyR'Ys    (R&RtcompatRtMutableMappingRRR'(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyts JPK.e[  requests/structures.pynu[# -*- coding: utf-8 -*- """ requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. """ import collections from .compat import OrderedDict class CaseInsensitiveDict(collections.MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, collections.Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super(LookupDict, self).__init__() def __repr__(self): return '' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default) PK.e[`SSrequests/help.pynu["""Module containing bug report helper(s).""" from __future__ import print_function import json import platform import sys import ssl from pip._vendor import idna from pip._vendor import urllib3 from pip._vendor import chardet from . import __version__ as requests_version try: from .packages.urllib3.contrib import pyopenssl except ImportError: pyopenssl = None OpenSSL = None cryptography = None else: import OpenSSL import cryptography def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 2.7.5 it will return {'name': 'CPython', 'version': '2.7.5'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. """ implementation = platform.python_implementation() if implementation == 'CPython': implementation_version = platform.python_version() elif implementation == 'PyPy': implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': implementation_version = ''.join([ implementation_version, sys.pypy_version_info.releaselevel ]) elif implementation == 'Jython': implementation_version = platform.python_version() # Complete Guess elif implementation == 'IronPython': implementation_version = platform.python_version() # Complete Guess else: implementation_version = 'Unknown' return {'name': implementation, 'version': implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { 'system': platform.system(), 'release': platform.release(), } except IOError: platform_info = { 'system': 'Unknown', 'release': 'Unknown', } implementation_info = _implementation() urllib3_info = {'version': urllib3.__version__} chardet_info = {'version': chardet.__version__} pyopenssl_info = { 'version': None, 'openssl_version': '', } if OpenSSL: pyopenssl_info = { 'version': OpenSSL.__version__, 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, } cryptography_info = { 'version': getattr(cryptography, '__version__', ''), } idna_info = { 'version': getattr(idna, '__version__', ''), } # OPENSSL_VERSION_NUMBER doesn't exist in the Python 2.6 ssl module. system_ssl = getattr(ssl, 'OPENSSL_VERSION_NUMBER', None) system_ssl_info = { 'version': '%x' % system_ssl if system_ssl is not None else '' } return { 'platform': platform_info, 'implementation': implementation_info, 'system_ssl': system_ssl_info, 'using_pyopenssl': pyopenssl is not None, 'pyOpenSSL': pyopenssl_info, 'urllib3': urllib3_info, 'chardet': chardet_info, 'cryptography': cryptography_info, 'idna': idna_info, 'requests': { 'version': requests_version, }, } def main(): """Pretty-print the bug information as JSON.""" print(json.dumps(info(), sort_keys=True, indent=2)) if __name__ == '__main__': main() PK.e[:<#J#Jrequests/adapters.pycnu[ abc@s5dZddlZddlZddlmZmZddlmZddl m Z ddl m Z ddlmZddlmZdd lmZdd lmZdd lmZdd lmZdd lmZddlmZddlmZddlmZddlmZddlmZm Z ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'ddl(m)Z)ddl*m+Z+ddl,m-Z-m.Z.m/Z/mZmZm0Z0m1Z1ddl2m3Z3yddl4m5Z5Wne6k rdZ5nXe7Z8dZ9dZ:dZ<de=fdYZ>de>fd YZ?dS(!s requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. iN(t PoolManagertproxy_from_url(t HTTPResponse(tTimeout(tRetry(tClosedPoolError(tConnectTimeoutError(t HTTPError(t MaxRetryError(tNewConnectionError(t ProxyError(t ProtocolError(tReadTimeoutError(tSSLError(t ResponseErrori(tResponse(turlparset basestring(tDEFAULT_CA_BUNDLE_PATHtget_encoding_from_headerstprepend_scheme_if_neededtget_auth_from_urlt urldefragautht select_proxy(tCaseInsensitiveDict(textract_cookies_to_jar(tConnectionErrortConnectTimeoutt ReadTimeoutR R t RetryErrort InvalidSchema(t_basic_auth_str(tSOCKSProxyManagercOstddS(Ns'Missing dependencies for SOCKS support.(R(targstkwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR +si it BaseAdaptercBs8eZdZdZededddZdZRS(sThe Base Transport AdaptercCstt|jdS(N(tsuperR#t__init__(tself((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR%7scCs tdS(sCSends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. N(tNotImplementedError(R&trequesttstreamttimeouttverifytcerttproxies((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytsend:scCs tdS(s!Cleans up adapter specific items.N(R'(R&((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytcloseLsN( t__name__t __module__t__doc__R%tFalsetNonetTrueR.R/(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR#4s   t HTTPAdaptercBseZdZdddddgZeeeedZdZdZ ed Z d Z d Z d Z dd ZdZdZdZdZededddZRS(sThe built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session ` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) t max_retriestconfigt_pool_connectionst _pool_maxsizet _pool_blockcCs|tkr$tddt|_ntj||_i|_i|_tt|j ||_ ||_ ||_ |j ||d|dS(Nitreadtblock(tDEFAULT_RETRIESRR3R7tfrom_intR8t proxy_managerR$R6R%R9R:R;tinit_poolmanager(R&tpool_connectionst pool_maxsizeR7t pool_block((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR%ns      cstfdjDS(Nc3s'|]}|t|dfVqdS(N(tgetattrR4(t.0tattr(R&(sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pys s(tdictt __attrs__(R&((R&sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt __getstate__scCsbi|_i|_x*|jD]\}}t|||qW|j|j|jd|jdS(NR=(R@R8titemstsetattrRAR9R:R;(R&tstateRGtvalue((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt __setstate__s   c KsF||_||_||_td|d|d|dt||_dS(sInitializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. t num_poolstmaxsizeR=tstrictN(R9R:R;RR5t poolmanager(R&t connectionsRQR=t pool_kwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyRAs   c Ks||jkr|j|}n|jjdrt|\}}t|d|d|d|jd|jd|j|}|j|`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager tsockstusernametpasswordRPRQR=t proxy_headers( R@tlowert startswithRR R9R:R;RYR(R&tproxyt proxy_kwargstmanagerRWRXRY((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytproxy_manager_fors*     cCs|jjdr|rd }|tk r6|}n|sEt}n| s_tjj| rwtdj |nd|_ tjj |s||_ q||_ nd|_ d |_ d |_ |rt|ts|d|_|d|_n||_d |_|jrCtjj|j rCtdj |jn|jrtjj|j rtdj |jqnd S( sAVerify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. thttpssFCould not find a suitable TLS CA certificate bundle, invalid path: {0}t CERT_REQUIREDt CERT_NONEiis:Could not find the TLS certificate file, invalid path: {0}s2Could not find the TLS key file, invalid path: {0}N(RZR[R4R5RtostpathtexiststIOErrortformatt cert_reqstisdirtca_certst ca_cert_dirt isinstanceRt cert_filetkey_file(R&tconnturlR+R,tcert_loc((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt cert_verifys8                cCst}t|dd|_tt|di|_t|j|_||_|jj |_ t |j t r|j j d|_ n |j |_ t|j||||_||_|S(sBuilds a :class:`Response ` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter ` :param req: The :class:`PreparedRequest ` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response tstatustheaderssutf-8N(RRER4t status_codeRRtRtencodingtrawtreasonRlRptbytestdecodeRtcookiesR(t connection(R&treqtresptresponse((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytbuild_responses     cCsst||}|rEt|d}|j|}|j|}n*t|}|j}|jj|}|S(sReturns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool thttp(RRR_tconnection_from_urlRtgeturlRS(R&RpR-R\R@Rotparsed((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pytget_connection"s   cCs5|jjx!|jjD]}|jqWdS(sDisposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. N(RStclearR@tvalues(R&R\((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR/9s c Cst|j|}t|jj}|o3|dk}t}|rit|jj}|jd}n|j}|r| rt|j}n|S(s?Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str R`RV( RRpRtschemeR3RZR[tpath_urlR( R&R(R-R\Rtis_proxied_http_requesttusing_socks_proxyt proxy_schemeRp((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt request_urlCs  cKsdS(s"Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter `. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param request: The :class:`PreparedRequest ` to add headers to. :param kwargs: The keyword arguments from the call to send(). N((R&R(R"((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt add_headers`s cCs8i}t|\}}|r4t|||d`. :param proxies: The url of the proxy being used for this request. :rtype: dict sProxy-Authorization(RR(R&R\RtRWRX((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyRYns cCs}|j|j|}|j||j|||j||}|j||jdkphd|jk } t|t ry%|\} } t d| d| }Wqt k r} dj |} t | qXn't|t rnt d|d|}y| s[|j d|jd|d|jd|jd td td td td |jd| }nft|drv|j}n|jdt}y"|j|j|dtx-|jjD]\}}|j||qW|jx^|jD]S}|jtt|djd|jd|j||jdqW|jdy|jdt}Wntk r|j}nXt j!|d|d|d td t}Wn|j"nXWnt#t$j%fk r} t&| d|n{t'k r} t| j(t)r=t| j(t*s=t+| d|q=nt| j(t,rdt-| d|nt| j(t.rt/| d|nt| j(t0rt1| d|nt&| d|nt2k r} t&| d|nt.k r } t/| ndt0t3fk rl} t| t0rBt1| d|qmt| t4rft5| d|qmnX|j6||S(sSends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response sContent-LengthtconnectR<ssInvalid timeout {0}. Pass a (connect, read) timeout tuple, or a single float to set both timeouts to the same valuetmethodRptbodyRttredirecttassert_same_hosttpreload_contenttdecode_contenttretriesR*t proxy_pooltskip_accept_encodingisutf-8s s0 t bufferingtpoolR|R(N(7RRpRrRRRR4RtRlttuplet TimeoutSaucet ValueErrorRgturlopenRR3R7thasattrRt _get_conntDEFAULT_POOL_TIMEOUTt putrequestR5RKt putheadert endheadersR.thextlentencodet getresponset TypeErrorRt from_httplibR/R tsocketterrorRRRxRR RRRt _ProxyErrorR t _SSLErrorR Rt _HTTPErrorR RR(R&R(R)R*R+R,R-RoRptchunkedRR<teterrR~tlow_conntheaderRNtitr((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR.s            &       N(R0R1R2RItDEFAULT_POOLSIZER>tDEFAULT_POOLBLOCKR%RJRORAR_RrRR4RR/RRRYR3R5R.(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyR6Qs$      % 4 %    (@R2tos.pathRcRtpip._vendor.urllib3.poolmanagerRRtpip._vendor.urllib3.responseRtpip._vendor.urllib3.utilRRtpip._vendor.urllib3.util.retryRtpip._vendor.urllib3.exceptionsRRRRRR R RR R R RRtmodelsRtcompatRRtutilsRRRRRRt structuresRR{Rt exceptionsRRRRRtauthRt!pip._vendor.urllib3.contrib.socksR t ImportErrorR3RRR>R4RtobjectR#R6(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyt sB  .4  PK.e[6dWdWrequests/sessions.pyonu[ abc@s+dZddlZddlZddlZddlmZddlmZddlm Z ddl m Z m Z m Z mZmZddlmZmZmZmZdd lmZmZmZdd lmZmZdd lmZdd lmZm Z dd l!m"Z"m#Z#m$Z$m%Z%ddl&m'Z'ddl(m)Z)ddlm*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0ddl1m2Z2ddlm3Z3ej4dkry ej5Z6Wne7k rej8Z6nXn ejZ6e dZ9e dZ:de;fdYZ<de<fdYZ=dZ>dS(s requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). iN(tMapping(t timedeltai(t_basic_auth_str(t cookielibtis_py3t OrderedDictturljointurlparse(tcookiejar_from_dicttextract_cookies_to_jartRequestsCookieJart merge_cookies(tRequesttPreparedRequesttDEFAULT_REDIRECT_LIMIT(t default_hookst dispatch_hook(tto_native_string(tto_key_val_listtdefault_headers(tTooManyRedirectst InvalidSchematChunkedEncodingErrortContentDecodingError(tCaseInsensitiveDict(t HTTPAdapter(t requote_uritget_environ_proxiestget_netrc_authtshould_bypass_proxiestget_auth_from_urlt rewind_bodyt DEFAULT_PORTS(tcodes(tREDIRECT_STATItWindowscCs|dkr|S|dkr |St|to;t|tsB|S|t|}|jt|g|jD]\}}|dkrt|^qt}x|D] }||=qW|S(sDetermines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` N(tNonet isinstanceRRtupdatetitems(trequest_settingtsession_settingt dict_classtmerged_settingtktvt none_keystkey((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt merge_setting2s  1  cCsZ|dks!|jdgkr%|S|dksF|jdgkrJ|St|||S(sProperly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. tresponseN(R$tgetR0(t request_hookst session_hooksR*((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt merge_hooksQs !!tSessionRedirectMixincBsPeZdZdZededdedZdZdZ dZ RS(cCs?|jr;|jd}tr.|jd}nt|dSdS(s7Receives a Response. Returns a redirect URI or ``None``tlocationtlatin1tutf8N(t is_redirecttheadersRtencodeRR$(tselftrespR7((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytget_redirect_targetbs    cCst|}t|}|j|jkr.tS|jdkrn|jdkrn|jdkrn|jdkrntS|j|jk}|j|jk}tj|jddf}| r|j|kr|j|krtS|p|S(sFDecide whether Authorization header should be removed when redirectingthttpiPthttpsiN(iPN(iN( RthostnametTruetschemetportR$tFalseR R2(R=told_urltnew_urlt old_parsedt new_parsedt changed_porttchanged_schemet default_port((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytshould_strip_authxs  c ksg} |j|} x| r|j} | j|| d|_y |jWn-tttfk r~|jj dt nXt |j|j krt d|j d|n|j| jdrt|j} dt| j| f} nt| }|j} |js3t|jt| } n t| } t| | _|j| ||jtjtjfkrd}x!|D]}| jj|dqWd| _ n| j}y |d =Wnt!k rnXt"| j#||jt$| j#|j%| j&| j#|j'| |}|j(| || j)dk oVd|kpVd |k}|rlt*| n| }|r|Vq|j+|d |d |d |d|d|dt | }t"|j%| |j|j|} |VqWdS(sBReceives a Response. Returns a generator of Responses or Requests.itdecode_contentsExceeded %s redirects.R1s//s%s:%ssContent-Lengths Content-TypesTransfer-EncodingtCookietstreamttimeouttverifytcerttproxiestallow_redirectsN(sContent-Lengths Content-TypesTransfer-Encoding(,R?tcopytappendthistorytcontentRRt RuntimeErrortrawtreadRFtlent max_redirectsRtcloset startswithRturlRRDtgeturltnetlocRRtrebuild_methodt status_codeR!ttemporary_redirecttpermanent_redirectR;tpopR$tbodytKeyErrorR t_cookiesR tcookiestprepare_cookiestrebuild_proxiest rebuild_autht_body_positionRtsend(R=R>treqRQRRRSRTRUtyield_requeststadapter_kwargsthistRbtprepared_requestt parsed_rurltparsedtpurged_headerstheaderR;t rewindable((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytresolve_redirectssr                 cCs{|j}|j}d|kr@|j|jj|r@|d=n|jrUt|nd}|dk rw|j|ndS(sWhen being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. t AuthorizationN(R;RbRNtrequestt trust_envRR$t prepare_auth(R=RwR1R;Rbtnew_auth((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRps  $  c Cs5|dk r|ni}|j}|j}t|j}|j}|jd}t|d|}|jr| rt |d|} | j|| jd} | r|j || qnd|kr|d=nyt ||\} } Wnt k rd\} } nX| r1| r1t | | |d>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') R;RmtauthRUthookstparamsRSRTtprefetchtadaptersRQRR_cCst|_d|_i|_t|_i|_t|_ t |_ d|_ t |_t |_ti|_t|_|jdt|jdtdS(Nshttps://shttp://(RR;R$RRURRRRFRQRCRSRTRR_RRRmRRtmountR(R=((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt__init__js           cCs|S(N((R=((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __enter__scGs|jdS(N(R`(R=targs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt__exit__scCs*|jp i}t|tjs0t|}nttt|j|}|j}|jr| r|j rt |j }nt }|j d|j jd|j d|jd|jd|jdt|j|jdtdt|j|jd t||jd |d t|j|j |S( sConstructs a :class:`PreparedRequest ` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request ` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest RRbtfilestdatatjsonR;R*RRRmR(RmR%Rt CookieJarRR R RRRRbR tprepareRtupperRRRR0R;RRR5R(R=RRmtmerged_cookiesRtp((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytprepare_requests*        cCstd|jd|d|d|d|p-id|d|p?id|d |d | }|j|}| poi} |j|j| | ||}i| d 6| d 6}|j||j||}|S( sConstructs a :class:`Request `, prepares it and sends it. Returns :class:`Response ` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response RRbR;RRRRRRmRRRRV(R RRtmerge_environment_settingsRbR&Rr(R=RRbRRR;RmRRRRRVRURRQRSRTRRstpreptsettingst send_kwargsR>((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRs*)       cKs#|jdt|jd||S(sSends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVR(RRCR(R=Rbtkwargs((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyR2scKs#|jdt|jd||S(sSends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVtOPTIONS(RRCR(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytoptions!scKs#|jdt|jd||S(sSends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RVR(RRFR(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pythead,scKs|jd|d|d||S(sSends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response RRR(R(R=RbRRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytpost7s cKs|jd|d||S(sYSends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tPUTR(R(R=RbRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytputCs cKs|jd|d||S(s[Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tPATCHR(R(R=RbRR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytpatchNs cKs|jd||S(sSends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response tDELETE(R(R=RbR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytdeleteYsc Ks|jd|j|jd|j|jd|j|jd|jt|trjtdn|jdt }|j d}|j }|j d|j }t}|j||}t|} td| |_td |||}|jr1x-|jD]} t|j| j| jq Wnt|j||j|j|||} |r{g| D]} | ^qing} | r| jd || j}| |_n|sy(t|j||d t ||_Wqtk rqXn|s|jn|S( sISend a given PreparedRequest. :rtype: requests.Response RQRSRTRUs#You can only send PreparedRequests.RVRbtsecondsR1iRt(RRQRSRTRUR%R t ValueErrorRiRCR2Rt get_adapterRbtpreferred_clockRrRtelapsedRRYR RmRR\R}tinserttnextt_nextt StopIterationRZ( R=RRRVRQRtadaptertstarttrRR>tgenRY((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRrcsB     %  (  c Cs|jr|dk r$|jdnd}t|d|}x*|jD]\}} |j|| qIW|tks|dkrtjjdptjjd}qnt ||j }t ||j }t ||j }t ||j }i|d6|d6|d6|d6S( s^ Check the environment and merge it with some settings. :rtype: dict RtREQUESTS_CA_BUNDLEtCURL_CA_BUNDLERSRURQRTN(RR$R2RR'RRCtostenvironR0RURQRSRT( R=RbRURQRSRTRt env_proxiesR,R-((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRs !cCsMx6|jjD]%\}}|jj|r|SqWtd|dS(s~ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter s*No connection adapters were found for '%s'N(RR'tlowerRaR(R=RbtprefixR((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRscCs(x!|jjD]}|jqWdS(s+Closes all adapters and as such the sessionN(RtvaluesR`(R=R-((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyR`scCso||j|s(tdictt __attrs__(R=tstate((R=sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __getstate__scCs1x*|jD]\}}t|||q WdS(N(R'tsetattr(R=RRtvalue((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt __setstate__sN(RRt__doc__RRRRRR$RCRR2RRRRRRRrRRR`RRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyRQs2  7   ) D  I    cCstS(sQ Returns a :class:`Session` for context-management. :rtype: Session (R(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pytsessions(?RRtplatformttimet collectionsRtdatetimeRRRtcompatRRRRRRmRR R R tmodelsR R RRRRt_internal_utilsRtutilsRRt exceptionsRRRRt structuresRRRRRRRRRR t status_codesR!R"tsystemt perf_counterRtAttributeErrortclockR0R5tobjectR6RR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyt s<   (""4     PK.e[jFKKrequests/__version__.pyonu[ abc@s@dZdZdZdZdZdZdZdZdZd Z d S( trequestssPython HTTP for Humans.shttp://python-requests.orgs2.18.4is Kenneth Reitzsme@kennethreitz.orgs Apache 2.0sCopyright 2017 Kenneth Reitzu ✨ 🍰 ✨N( t __title__t__description__t__url__t __version__t __build__t __author__t__author_email__t __license__t __copyright__t__cake__(((sD/usr/lib/python2.7/site-packages/pip/_vendor/requests/__version__.pytsPK.e[+ + requests/exceptions.pynu[# -*- coding: utf-8 -*- """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. """ from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request. """ def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" response = kwargs.pop('response', None) self.response = response self.request = kwargs.pop('request', None) if (response is not None and not self.request and hasattr(response, 'request')): self.request = self.response.request super(RequestException, self).__init__(*args, **kwargs) class HTTPError(RequestException): """An HTTP error occurred.""" class ConnectionError(RequestException): """A Connection error occurred.""" class ProxyError(ConnectionError): """A proxy error occurred.""" class SSLError(ConnectionError): """An SSL error occurred.""" class Timeout(RequestException): """The request timed out. Catching this error will catch both :exc:`~requests.exceptions.ConnectTimeout` and :exc:`~requests.exceptions.ReadTimeout` errors. """ class ConnectTimeout(ConnectionError, Timeout): """The request timed out while trying to connect to the remote server. Requests that produced this error are safe to retry. """ class ReadTimeout(Timeout): """The server did not send any data in the allotted amount of time.""" class URLRequired(RequestException): """A valid URL is required to make a request.""" class TooManyRedirects(RequestException): """Too many redirects.""" class MissingSchema(RequestException, ValueError): """The URL schema (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): """See defaults.py for valid schemas.""" class InvalidURL(RequestException, ValueError): """The URL provided was somehow invalid.""" class InvalidHeader(RequestException, ValueError): """The header value provided was somehow invalid.""" class ChunkedEncodingError(RequestException): """The server declared chunked encoding but sent an invalid chunk.""" class ContentDecodingError(RequestException, BaseHTTPError): """Failed to decode response content""" class StreamConsumedError(RequestException, TypeError): """The content for this response was already consumed""" class RetryError(RequestException): """Custom retries logic failed""" class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body""" # Warnings class RequestsWarning(Warning): """Base warning for Requests.""" pass class FileModeWarning(RequestsWarning, DeprecationWarning): """A file was opened in text mode, but Requests determined its binary length.""" pass class RequestsDependencyWarning(RequestsWarning): """An imported dependency doesn't match the expected version range.""" pass PK.e[u_@ggrequests/__init__.pyonu[ abc@stdZddlmZddlmZddlZddlmZdZyeejejWn9e e fk rej dj ejejenXdd l mZejd edd lmZmZmZmZdd lmZmZmZmZdd lmZmZddlmZddlmZddlmZmZmZddl m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(ddl)m*Z*m+Z+ddl,m-Z-ddlm.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6ddl7Z7yddl7m8Z8Wn*e9k r@de7j:fdYZ8nXe7j;e<j=e8ejde4de>dS(s Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at . :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. i(turllib3(tchardetNi(tRequestsDependencyWarningcCs|jd}t|dkr1|jdn|\}}}t|t|t|}}}|jdd \}}}t|t|t|}}}dS(Nt.it0i(tsplittlentappendtint(turllib3_versiontchardet_versiontmajortminortpatch((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pytcheck_compatibility1s&&sAurllib3 ({0}) or chardet ({1}) doesn't match a supported version!(tDependencyWarningtignore(t __title__t__description__t__url__t __version__(t __build__t __author__t__author_email__t __license__(t __copyright__t__cake__(tutils(tpackages(tRequesttResponsetPreparedRequest(trequesttgettheadtpostR tputtdeletetoptions(tsessiontSession(tcodes( tRequestExceptiontTimeoutt URLRequiredtTooManyRedirectst HTTPErrortConnectionErrortFileModeWarningtConnectTimeoutt ReadTimeout(t NullHandlerR3cBseZdZRS(cCsdS(N((tselftrecord((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pytemitss(t__name__t __module__R6(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyR3rstdefaultR(?t__doc__t pip._vendorRRtwarningst exceptionsRRRtAssertionErrort ValueErrortwarntformattpip._vendor.urllib3.exceptionsRt simplefilterRRRRRRRRRtRRtmodelsRRRtapiR R!R"R#R R$R%R&tsessionsR'R(t status_codesR)R*R+R,R-R.R/R0R1R2tloggingR3t ImportErrortHandlert getLoggerR7t addHandlertTrue(((sA/usr/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyt)s<    "":@  PK.e[]< C C requests/help.pycnu[ abc@s dZddlmZddlZddlZddlZddlZddlmZddlm Z ddlm Z ddl m Z ydd lmZWn#ek rdZdZdZnXddlZddlZd Zd Zd Zed kr endS(s'Module containing bug report helper(s).i(tprint_functionN(tidna(turllib3(tchardeti(t __version__(t pyopensslcCstj}|dkr'tj}n|dkrdtjjtjjtjjf}tjjdkrdj |tjjg}qn<|dkrtj}n!|dkrtj}nd}i|d 6|d 6S( sReturn a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 2.7.5 it will return {'name': 'CPython', 'version': '2.7.5'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. tCPythontPyPys%s.%s.%stfinalttJythont IronPythontUnknowntnametversion( tplatformtpython_implementationtpython_versiontsystpypy_version_infotmajortminortmicrot releaseleveltjoin(timplementationtimplementation_version((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pyt_implementations       c Csqy$itjd6tjd6}Wn%tk rKidd6dd6}nXt}itjd6}itjd6}idd6dd6}t rit jd6dt j j d6}nit t ddd6}it tddd6}t td d}i|dk rd|ndd6}i |d 6|d 6|d 6tdk d 6|d6|d6|d6|d6|d6itd6d6S(s&Generate information for a bug report.tsystemtreleaseR RR topenssl_versions%xRtOPENSSL_VERSION_NUMBERRRt system_ssltusing_pyopensslt pyOpenSSLRRt cryptographyRtrequestsN(RRRtIOErrorRRRRtNonetOpenSSLtSSLRtgetattrR#RtsslRtrequests_version( t platform_infotimplementation_infot urllib3_infot chardet_infotpyopenssl_infotcryptography_infot idna_infoR tsystem_ssl_info((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pytinfo;sJ       cCs&ttjtdtdddS(s)Pretty-print the bug information as JSON.t sort_keystindentiN(tprinttjsontdumpsR4tTrue(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pytmainrst__main__(t__doc__t __future__RR8RRR*t pip._vendorRRRR RR+tpackages.urllib3.contribRt ImportErrorR&R'R#RR4R;t__name__(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/help.pyts,         ! 7  PK.e[e}Gtrequests/models.pynu[# -*- coding: utf-8 -*- """ requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. """ import collections import datetime import sys # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/requests/requests/issues/3578. import encodings.idna from pip._vendor.urllib3.fields import RequestField from pip._vendor.urllib3.filepost import encode_multipart_formdata from pip._vendor.urllib3.util import parse_url from pip._vendor.urllib3.exceptions import ( DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) from io import UnsupportedOperation from .hooks import default_hooks from .structures import CaseInsensitiveDict from .auth import HTTPBasicAuth from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar from .exceptions import ( HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, ContentDecodingError, ConnectionError, StreamConsumedError) from ._internal_utils import to_native_string, unicode_is_ascii from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, iter_slices, guess_json_utf, super_len, check_header_validity) from .compat import ( cookielib, urlunparse, urlsplit, urlencode, str, bytes, is_py2, chardet, builtin_str, basestring) from .compat import json as complexjson from .status_codes import codes #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 class RequestEncodingMixin(object): @property def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url) @staticmethod def _encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return data elif hasattr(data, 'read'): return data elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( (k.encode('utf-8') if isinstance(k, str) else k, v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp else: fdata = fp.read() rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % (event)) if isinstance(hook, collections.Callable): self.hooks[event].append(hook) elif hasattr(hook, '__iter__'): self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable)) def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """ try: self.hooks[event].remove(hook) return True except ValueError: return False class Request(RequestHooksMixin): """A user-created :class:`Request ` object. Used to prepare a :class:`PreparedRequest `, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> req.prepare() """ def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): # Default empty dicts for dict params. data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): self.register_hook(event=k, hook=v) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies def __repr__(self): return '' % (self.method) def prepare(self): """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest ` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request ` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> r = req.prepare() >>> s = requests.Session() >>> s.send(r) """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() #: integer denoting starting position of a readable file-like body. self._body_position = None def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return '' % (self.method) def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks p._body_position = self._body_position return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = to_native_string(self.method.upper()) @staticmethod def _get_idna_encoded_host(host): import idna try: host = idna.encode(host, uts46=True).decode('utf-8') except idna.IDNAError: raise UnicodeError return host def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindly call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/requests/requests/pull/2238 if isinstance(url, bytes): url = url.decode('utf8') else: url = unicode(url) if is_py2 else str(url) # Remove leading whitespaces from url url = url.lstrip() # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") error = error.format(to_native_string(url, 'utf8')) raise MissingSchema(error) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA # behaviour. For strings containing only ASCII characters, we need to also verify # it doesn't start with a wildcard (*), before allowing the unencoded hostname. if not unicode_is_ascii(host): try: host = self._get_idna_encoded_host(host) except UnicodeError: raise InvalidURL('URL has an invalid label.') elif host.startswith(u'*'): raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') if isinstance(params, (str, bytes)): params = to_native_string(params) enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" self.headers = CaseInsensitiveDict() if headers: for header in headers.items(): # Raise exception on invalid header value. check_header_validity(header) name, value = header self.headers[to_native_string(name)] = value def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. content_type = 'application/json' body = complexjson.dumps(json) if not isinstance(body, bytes): body = body.encode('utf-8') is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, collections.Mapping)) ]) try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None if is_stream: body = data if getattr(body, 'tell', None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length: self.headers['Content-Length'] = builtin_str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ('content-type' not in self.headers): self.headers['Content-Type'] = content_type self.body = body def prepare_content_length(self, body): """Prepare Content-Length header based on request method and body""" if body is not None: length = super_len(body) if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. self.headers['Content-Length'] = builtin_str(length) elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) self.headers['Content-Length'] = '0' def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest ` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. """ if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers['Cookie'] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) class Response(object): """The :class:`Response ` object, which contains a server's response to an HTTP request. """ __attrs__ = [ '_content', 'status_code', 'headers', 'url', 'history', 'encoding', 'reason', 'cookies', 'elapsed', 'request' ] def __init__(self): self._content = False self._content_consumed = False self._next = None #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. # This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. self.url = None #: Encoding to decode with when accessing r.text. self.encoding = None #: A list of :class:`Response ` objects from #: the history of the Request. Any redirect responses will end #: up here. The list is sorted from the oldest to the most recent request. self.history = [] #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. self.cookies = cookiejar_from_dict({}) #: The amount of time elapsed between sending the request #: and the arrival of the response (as a timedelta). #: This property specifically measures the time taken between sending #: the first byte of the request and finishing parsing the headers. It #: is therefore unaffected by consuming the response content or the #: value of the ``stream`` keyword argument. self.elapsed = datetime.timedelta(0) #: The :class:`PreparedRequest ` object to which this #: is a response. self.request = None def __enter__(self): return self def __exit__(self, *args): self.close() def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. if not self._content_consumed: self.content return dict( (attr, getattr(self, attr, None)) for attr in self.__attrs__ ) def __setstate__(self, state): for name, value in state.items(): setattr(self, name, value) # pickled objects do not have .raw setattr(self, '_content_consumed', True) setattr(self, 'raw', None) def __repr__(self): return '' % (self.status_code) def __bool__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __nonzero__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __iter__(self): """Allows you to use a response as an iterator.""" return self.iter_content(128) @property def ok(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ try: self.raise_for_status() except HTTPError: return False return True @property def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ return ('location' in self.headers and self.status_code in REDIRECT_STATI) @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) @property def next(self): """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" return self._next @property def apparent_encoding(self): """The apparent encoding, provided by the chardet library.""" return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """ def generate(): # Special case for urllib3. if hasattr(self.raw, 'stream'): try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) else: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending @property def content(self): """Content of the response, in bytes.""" if self._content is False: # Read the contents. if self._content_consumed: raise RuntimeError( 'The content for this response was already consumed') if self.status_code == 0 or self.raw is None: self._content = None else: self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes() self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self._content @property def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return str('') # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors='replace') return content def json(self, **kwargs): r"""Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises ValueError: If the response body does not contain valid json. """ if not self.encoding and self.content and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using chardet to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return complexjson.loads( self.content.decode(encoding), **kwargs ) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass return complexjson.loads(self.text, **kwargs) @property def links(self): """Returns the parsed header links of the response, if any.""" header = self.headers.get('link') # l = MultiDict() l = {} if header: links = parse_header_links(header) for link in links: key = link.get('rel') or link.get('url') l[key] = link return l def raise_for_status(self): """Raises stored :class:`HTTPError`, if one occurred.""" http_error_msg = '' if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) elif 500 <= self.status_code < 600: http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: raise HTTPError(http_error_msg, response=self) def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ if not self._content_consumed: self.raw.close() release_conn = getattr(self.raw, 'release_conn', None) if release_conn is not None: release_conn() PK.e[2ۄWWrequests/cookies.pyonu[ abc@sQdZddlZddlZddlZddlZddlmZddlmZm Z m Z m Z yddl Z Wne k rddlZ nXdefdYZdefd YZd Zd Zddd Zd efdYZdejejfdYZdZdZdZdedZdZ dS(s requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. iNi(tto_native_string(t cookielibturlparset urlunparsetMorselt MockRequestcBseZdZdZdZdZdZdZdZdZ ddZ d Z d Z d Zed Zed ZedZRS(sWraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. cCs.||_i|_t|jjj|_dS(N(t_rt _new_headersRturltschemettype(tselftrequest((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt__init__&s  cCs|jS(N(R (R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_type+scCst|jjjS(N(RRRtnetloc(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_host.scCs |jS(N(R(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytget_origin_req_host1scCsx|jjjds|jjSt|jjddd}t|jj}t|j||j|j |j |j gS(NtHosttencodingsutf-8( RtheaderstgetRRRRR tpathtparamstquerytfragment(R thosttparsed((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt get_full_url4s cCstS(N(tTrue(R ((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytis_unverifiableBscCs||jjkp||jkS(N(RRR(R tname((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt has_headerEscCs%|jjj||jj||S(N(RRRR(R Rtdefault((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt get_headerHscCstddS(sMcookielib has no legitimate use for this method; add it back if you find one.s=Cookie headers should be added with add_unredirected_header()N(tNotImplementedError(R tkeytval((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt add_headerKscCs||j|(RR'RQtresulttbadargsterr((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyROs0   cCs!d}|dr_y$ttjt|d}Wqtk r[td|dqXn2|drd}tjtj|d|}ntd|ddt |ddt d|dd|d |j d |d d dd i|d d6dt dt |dd|j d|dpd S(sBConvert a Morsel object into a Cookie containing the one k/v pair.smax-agesmax-age: %s must be integerRs%a, %d-%b-%Y %H:%M:%S GMTRRRRBRRRRthttponlyRRRR'RiN( R/tintttimet ValueErrorRtcalendarttimegmtstrptimeRORR`R$R'(tmorselRt time_template((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyRNs0 $       cCs|dkrt}n|dk rg|D]}|j^q+}x@|D]5}|s_||krG|jt|||qGqGWn|S(s-Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. N(R/RJRRPRO(t cookie_dictREt overwriteRGtnames_from_jarR((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pytcookiejar_from_dicts    $cCst|tjs!tdnt|trKt|d|dt}nXt|tjry|j|Wqtk rx|D]}|j |qWqXn|S(sAdd cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. s!You can only merge into CookieJarRER( RMRRoRRRR`RqtAttributeErrorRP(REtcookiest cookie_in_jar((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt merge_cookies s  (!R.RpRRt collectionst_internal_utilsRtcompatRRRRRzt ImportErrortdummy_threadingtobjectRR1R=RAR/RHt RuntimeErrorRIRotMutableMappingRJRRORNRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyt s,    " H    # PK.e[Nrequests/exceptions.pyonu[ abc@sdZddlmZdefdYZdefdYZdefdYZd efd YZd efd YZd efdYZ dee fdYZ de fdYZ defdYZ defdYZ deefdYZdeefdYZdeefdYZdeefdYZdefd YZd!eefd"YZd#eefd$YZd%efd&YZd'efd(YZd)efd*YZd+eefd,YZd-efd.YZd/S(0s` requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. i(t HTTPErrortRequestExceptioncBseZdZdZRS(sTThere was an ambiguous exception that occurred while handling your request. cOs|jdd}||_|jdd|_|dk rg|j rgt|drg|jj|_ntt|j||dS(sBInitialize RequestException with `request` and `response` objects.tresponsetrequestN(tpoptNoneRRthasattrtsuperRt__init__(tselftargstkwargsR((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRs (t__name__t __module__t__doc__R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR sRcBseZdZRS(sAn HTTP error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRstConnectionErrorcBseZdZRS(sA Connection error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR st ProxyErrorcBseZdZRS(sA proxy error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR$stSSLErrorcBseZdZRS(sAn SSL error occurred.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR(stTimeoutcBseZdZRS(sThe request timed out. Catching this error will catch both :exc:`~requests.exceptions.ConnectTimeout` and :exc:`~requests.exceptions.ReadTimeout` errors. (R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR,stConnectTimeoutcBseZdZRS(sThe request timed out while trying to connect to the remote server. Requests that produced this error are safe to retry. (R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR5st ReadTimeoutcBseZdZRS(s@The server did not send any data in the allotted amount of time.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR<st URLRequiredcBseZdZRS(s*A valid URL is required to make a request.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR@stTooManyRedirectscBseZdZRS(sToo many redirects.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRDst MissingSchemacBseZdZRS(s/The URL schema (e.g. http or https) is missing.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRHst InvalidSchemacBseZdZRS(s"See defaults.py for valid schemas.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRLst InvalidURLcBseZdZRS(s%The URL provided was somehow invalid.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRPst InvalidHeadercBseZdZRS(s.The header value provided was somehow invalid.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRTstChunkedEncodingErrorcBseZdZRS(s?The server declared chunked encoding but sent an invalid chunk.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRXstContentDecodingErrorcBseZdZRS(s!Failed to decode response content(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR\stStreamConsumedErrorcBseZdZRS(s2The content for this response was already consumed(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR`st RetryErrorcBseZdZRS(sCustom retries logic failed(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRdstUnrewindableBodyErrorcBseZdZRS(s:Requests encountered an error when trying to rewind a body(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyRhstRequestsWarningcBseZdZRS(sBase warning for Requests.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR nstFileModeWarningcBseZdZRS(sJA file was opened in text mode, but Requests determined its binary length.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR!sstRequestsDependencyWarningcBseZdZRS(s@An imported dependency doesn't match the expected version range.(R R R(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyR"xsN(Rtpip._vendor.urllib3.exceptionsRt BaseHTTPErrortIOErrorRRRRRRRRRt ValueErrorRRRRRRt TypeErrorRRRtWarningR tDeprecationWarningR!R"(((sC/usr/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyts. PK.e[A8 requests/status_codes.pynu[# -*- coding: utf-8 -*- from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('permanent_redirect', 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 421: ('misdirected_request',), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } codes = LookupDict(name='status_codes') for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(('\\', '/')): setattr(codes, title.upper(), code) PK.e[E=&&requests/auth.pycnu[ abc@sdZddlZddlZddlZddlZddlZddlZddlmZddl m Z m Z m Z ddl mZddlmZddlmZd Zd Zd Zd efd YZdefdYZdefdYZdefdYZdS(s] requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. iN(t b64encodei(turlparsetstrt basestring(textract_cookies_to_jar(tto_native_string(tparse_dict_headers!application/x-www-form-urlencodedsmultipart/form-datacCst|ts:tjdj|dtt|}nt|tsttjdj|dtt|}nt|tr|jd}nt|tr|jd}ndtt dj ||fj }|S(sReturns a Basic Auth string.sNon-string usernames will no longer be supported in Requests 3.0.0. Please convert the object you've passed in ({0!r}) to a string or bytes object in the near future to avoid problems.tcategorysNon-string passwords will no longer be supported in Requests 3.0.0. Please convert the object you've passed in ({0!r}) to a string or bytes object in the near future to avoid problems.tlatin1sBasic t:( t isinstanceRtwarningstwarntformattDeprecationWarningRtencodeRRtjointstrip(tusernametpasswordtauthstr((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt_basic_auth_strs&   %tAuthBasecBseZdZdZRS(s4Base class that all auth implementations derive fromcCstddS(NsAuth hooks must be callable.(tNotImplementedError(tselftr((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__call__Ks(t__name__t __module__t__doc__R(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRHst HTTPBasicAuthcBs2eZdZdZdZdZdZRS(s?Attaches HTTP Basic Authentication to the given Request object.cCs||_||_dS(N(RR(RRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__init__Rs cCs:t|jt|ddk|jt|ddkgS(NRR(tallRtgetattrtNoneR(Rtother((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__eq__VscCs ||k S(N((RR#((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt__ne__\scCs t|j|j|jd<|S(Nt Authorization(RRRtheaders(RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyR_s(RRRRR$R%R(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyROs    t HTTPProxyAuthcBseZdZdZRS(s=Attaches HTTP Proxy Authentication to a given Request object.cCs t|j|j|jd<|S(NsProxy-Authorization(RRRR'(RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRgs(RRRR(((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyR(dstHTTPDigestAuthcBsVeZdZdZdZdZdZdZdZdZ dZ RS( s@Attaches HTTP Digest Authentication to the given Request object.cCs%||_||_tj|_dS(N(RRt threadingtlocalt _thread_local(RRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyRos  cCsat|jds]t|j_d|j_d|j_i|j_d|j_d|j_ ndS(Ntinitti( thasattrR,tTrueR-t last_noncet nonce_counttchalR"tpost num_401_calls(R((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytinit_per_thread_stateus     csN|jjd}|jjd}|jjjd}|jjjd}|jjjd}d|dkrzd}n |j}|dks|dkrd} | n|d krd } | nfd } dkrdSd} t|} | jp d }| jr+|d | j7}nd|j||j f}d||f}|}|}||jj kr|jj d7_ n d|j_ d|jj }t |jj j d}||j d7}|tjj d7}|tjd7}tj|jd }|dkrJd|||f}n|sl| |d||f}nP|dksd|jdkrd|||d|f}| ||}ndS||j_ d|j||||f}|r|d|7}n|r|d|7}n| r)|d| 7}n|rF|d||f7}nd|S(s :rtype: str trealmtnoncetqopt algorithmtopaquetMD5sMD5-SESScSs4t|tr!|jd}ntj|jS(Nsutf-8(R RRthashlibtmd5t hexdigest(tx((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytmd5_utf8stSHAcSs4t|tr!|jd}ntj|jS(Nsutf-8(R RRR=tsha1R?(R@((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytsha_utf8scsd||fS(Ns%s:%s((tstd(t hash_utf8(s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytR.t/t?s%s:%s:%ss%s:%sis%08xsutf-8iitautht,s%s:%s:%s:%s:%ss>username="%s", realm="%s", nonce="%s", uri="%s", response="%s"s , opaque="%s"s, algorithm="%s"s , digest="%s"s , qop="auth", nc=%s, cnonce="%s"s Digest %sN(R,R3tgetR"tupperRtpathtqueryRRR1R2RRttimetctimetosturandomR=RCR?tsplit(RtmethodturlR7R8R9R:R;t _algorithmRARDtKDtentdigtp_parsedROtA1tA2tHA1tHA2tncvalueREtcnoncetrespdigtnoncebittbase((RGs=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pytbuild_digest_headersr               ! cKs|jrd|j_ndS(s)Reset num_401_calls counter on redirects.iN(t is_redirectR,R5(RRtkwargs((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pythandle_redirects cKsd|jkodkns/d|j_|S|jjd k r]|jjj|jjn|jj dd}d|j kr~|jjdkr~|jjd7_t j dd t j }t|jd|d d|j_|j|j|jj}t|j|j|j|j|j|j|j|j|jd <|jj||}|jj|||_|Sd|j_|S( so Takes the given response and tries digest-auth, if needed. :rtype: requests.Response iiiswww-authenticateR.tdigestisdigest tflagstcountR&N(t status_codeR,R5R4R"trequesttbodytseekR'RMtlowertretcompilet IGNORECASERtsubR3tcontenttclosetcopyRt_cookiestrawtprepare_cookiesReRVRWt connectiontsendthistorytappend(RRRgts_authtpattprept_r((s=/usr/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyt handle_401s.  $$   cCs|j|jjr8|j|j|j|jds$       ,PK.e[waMZZrequests/compat.pynu[# -*- coding: utf-8 -*- """ requests.compat ~~~~~~~~~~~~~~~ This module handles import compatibility issues between Python 2 and Python 3. """ from pip._vendor import chardet import sys # ------- # Pythons # ------- # Syntax sugar. _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) # try: # import simplejson as json # except ImportError: import json # --------- # Specifics # --------- if is_py2: from urllib import ( quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment) from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag from urllib2 import parse_http_list import cookielib from Cookie import Morsel from StringIO import StringIO from pip._vendor.urllib3.packages.ordered_dict import OrderedDict builtin_str = str bytes = str str = unicode basestring = basestring numeric_types = (int, long, float) integer_types = (int, long) elif is_py3: from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO from collections import OrderedDict builtin_str = str str = str bytes = bytes basestring = (str, bytes) numeric_types = (int, float) integer_types = (int,) PK.e[0 requests/__init__.pynu[# -*- coding: utf-8 -*- # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at . :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ from pip._vendor import urllib3 from pip._vendor import chardet import warnings from .exceptions import RequestsDependencyWarning def check_compatibility(urllib3_version, chardet_version): urllib3_version = urllib3_version.split('.') assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append('0') # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1, <= 1.22 assert major == 1 assert minor >= 21 assert minor <= 22 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet >= 3.0.2, < 3.1.0 assert major == 3 assert minor < 1 assert patch >= 2 # Check imported dependencies for compatibility. try: check_compatibility(urllib3.__version__, chardet.__version__) except (AssertionError, ValueError): warnings.warn("urllib3 ({0}) or chardet ({1}) doesn't match a supported " "version!".format(urllib3.__version__, chardet.__version__), RequestsDependencyWarning) # Attempt to enable urllib3's SNI support, if possible # try: # from pip._vendor.urllib3.contrib import pyopenssl # pyopenssl.inject_into_urllib3() # except ImportError: # pass # urllib3's DependencyWarnings should be silenced. from pip._vendor.urllib3.exceptions import DependencyWarning warnings.simplefilter('ignore', DependencyWarning) from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __build__, __author__, __author_email__, __license__ from .__version__ import __copyright__, __cake__ from . import utils from . import packages from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout ) # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter('default', FileModeWarning, append=True) PK.e[ ;requests/hooks.pyonu[ abc@s%dZdgZdZdZdS(s requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. tresponsecCstdtDS(Ncss|]}|gfVqdS(N((t.0tevent((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pys s(tdicttHOOKS(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt default_hooksscKs{|p t}|j|}|rwt|dr?|g}nx5|D]*}|||}|dk rF|}qFqFWn|S(s6Dispatches a hook dictionary on a given piece of data.t__call__N(RtgetthasattrtNone(tkeythookst hook_datatkwargsthookt _hook_data((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt dispatch_hooks   N(t__doc__RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyt s  PK.e[h\h// ipaddress.pycnu[ abc@sdZddlmZddlZddlZdZefZyeefZWne k r`nXy e Z Wn)e k re Z e e kstnXdddkrdZn dZy ejZWnek rd ZnXd Zeed r d Zn d ZddZdefdYZdZdZdefdYZdefdYZdZedZ dZ!dZ"dZ#dZ$dZ%dZ&d Z'd!Z(d"Z)d#Z*d$efd%YZ+d&e+fd'YZ,d(e+fd)YZ-d*efd+YZ.d,e.e,fd-YZ/d.e/fd/YZ0d0e.e-fd1YZ1d2efd3YZ2e2e/_3d4efd5YZ4d6e4e,fd7YZ5d8e5fd9YZ6d:e4e-fd;YZ7d<efd=YZ8e8e5_3dS(>uA fast, lightweight IPv4/IPv6 manipulation library in Python. This library is used to create/poke/manipulate IPv4 and IPv6 addresses and networks. i(tunicode_literalsNu1.0.17sicCs|S(N((tbyt((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_compat_bytes_to_byte_valsscCs'g|D]}tjd|d^qS(Ns!Bi(tstructtunpack(Rtb((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR"scCsP|dkstd}x1|D])}t|ts:t|d>|}qW|S(Nubigii(tAssertionErrort isinstancet_compat_int_types(tbytvalst endianesstrestbv((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_compat_int_from_byte_vals's  cCst|tst|dks't|dkrm|dksK|dkr]tjdntjd|S|dkr|dks|dkrtjd ntjd |d ?|d @StdS(Nubigiiii u(integer out of range for 'I' format codes!Iiiu)integer out of range for 'QQ' format codes!QQi@lIl (RRRRterrortpacktNotImplementedError(tintvaltlengthR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_compat_to_bytes0s  u bit_lengthcCs |jS(N(t bit_length(ti((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_compat_bit_length?scCs/x(tjD]}||?dkr |Sq WdS(Ni(t itertoolstcount(RR ((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRBsiccs>|dkst|}x||kr9|V||7}qWdS(Ni(R(tstarttendtstepR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt _compat_rangeHs t_TotalOrderingMixincBsDeZdZdZdZdZdZdZdZRS(cCs tdS(N(R(tselftother((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__eq__WscCs$|j|}|tkrtS| S(N(R tNotImplemented(RRtequal((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__ne__Zs cCs tdS(N(R(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__lt__`scCs3|j|}|tks"| r/|j|S|S(N(R$R!R (RRtless((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__le__cs cCsI|j|}|tkrtS|j|}|tkr>tS|pG| S(N(R$R!R (RRR%R"((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__gt__is  cCs$|j|}|tkrtS| S(N(R$R!(RRR%((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__ge__rs (( t__name__t __module__t __slots__R R#R$R&R'R((((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRPs     i itAddressValueErrorcBseZdZRS(u%A Value Error related to the address.(R)R*t__doc__(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR,}stNetmaskValueErrorcBseZdZRS(u%A Value Error related to the netmask.(R)R*R-(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR.scCsyt|SWnttfk r'nXyt|SWnttfk rOnXt|trrtd|ntd|dS(uTake an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address ux%r does not appear to be an IPv4 or IPv6 address. Did you pass in a bytes (str in Python 2) instead of a unicode object?u0%r does not appear to be an IPv4 or IPv6 addressN(t IPv4AddressR,R.t IPv6AddressRtbytest ValueError(taddress((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt ip_addresss cCsyt||SWnttfk r*nXyt||SWnttfk rUnXt|trxtd|ntd|dS(uTake an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP network. Either IPv4 or IPv6 networks may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Network or IPv6Network object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Or if the network has host bits set. ux%r does not appear to be an IPv4 or IPv6 network. Did you pass in a bytes (str in Python 2) instead of a unicode object?u0%r does not appear to be an IPv4 or IPv6 networkN(t IPv4NetworkR,R.t IPv6NetworkRR1R2(R3tstrict((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt ip_networks cCsdyt|SWnttfk r'nXyt|SWnttfk rOnXtd|dS(ugTake an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Interface or IPv6Interface object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Notes: The IPv?Interface classes describe an Address on a particular Network, so they're basically a combination of both the Address and Network classes. u2%r does not appear to be an IPv4 or IPv6 interfaceN(t IPv4InterfaceR,R.t IPv6InterfaceR2(R3((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt ip_interfacescCsAyt|ddSWn&tjtfk r<tdnXdS(u`Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. iubigu&Address negative or too large for IPv4N(RRRt OverflowErrorR2(R3((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytv4_int_to_packedscCsAyt|ddSWn&tjtfk r<tdnXdS(uRepresent an address as 16 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv6 IP address. Returns: The integer address packed as 16 bytes in network (big-endian) order. iubigu&Address negative or too large for IPv6N(RRRR<R2(R3((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytv6_int_to_packeds cCs>t|jd}t|dkr:td|n|S(uAHelper to split the netmask and raise AddressValueError if neededu/iuOnly one '/' permitted in %r(t _compat_strtsplittlenR,(R3taddr((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_split_optional_netmasksccslt|}t|}}x>|D]6}|j|jdkrS||fV|}n|}q#W||fVdS(uFind a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. iN(titertnextt_ip(t addressestittfirsttlasttip((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_find_address_ranges     cCs,|dkr|St|t||d@S(uCount the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number. ii(tminR(tnumbertbits((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_count_righthand_zero_bits0s ccsKt|tot|ts-tdn|j|jkrXtd||fn||krstdn|jdkrt}n$|jdkrt}n td|j}|j}|j}xz||krFt t ||t ||dd}||||f}|V|d|>7}|d|j krPqqWdS( uSummarize a network range given the first and last IP addresses. Example: >>> list(summarize_address_range(IPv4Address('192.0.2.0'), ... IPv4Address('192.0.2.130'))) ... #doctest: +NORMALIZE_WHITESPACE [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), IPv4Network('192.0.2.130/32')] Args: first: the first IPv4Address or IPv6Address in the range. last: the last IPv4Address or IPv6Address in the range. Returns: An iterator of the summarized IPv(4|6) network objects. Raise: TypeError: If the first and last objects are not IP addresses. If the first and last objects are not the same version. ValueError: If the last object is not greater than the first. If the version of the first address is not 4 or 6. u1first and last must be IP addresses, not networksu%%s and %s are not of the same versionu*last IP address must be greater than firstiiuunknown IP versioniN( Rt _BaseAddresst TypeErrortversionR2R5R6t_max_prefixlenRFRMRPRt _ALL_ONES(RIRJRKtip_bitst first_inttlast_inttnbitstnet((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytsummarize_address_range@s0       ccst|}i}xm|r|j}|j}|j|}|dkr[||| [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. N( tlisttpoptsupernettgettNonetappendtsortedtvaluestbroadcast_address(RGtto_mergetsubnetsRZR^texistingRJ((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_collapse_addresses_internalws$         cCsg}g}g}x2|D]*}t|trw|rg|dj|jkrgtd||dfn|j|q|j|jkr|r|dj|jkrtd||dfny|j|jWqCtk r|j|j qCXq|r6|dj|jkr6td||dfn|j|qWt t |}|rx3t |D]"\}}|j t||qlWnt||S(uCollapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects. iu%%s and %s are not of the same version(RRQt_versionRRRat _prefixlenRTRKtAttributeErrortnetwork_addressRbtsetRLtextendR[Rh(RGtaddrstipstnetsRKRIRJ((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytcollapse_addressess4  cCs6t|tr|jSt|tr2|jStS(u2Return a key suitable for sorting between networks and addresses. Address and Network objects are not sortable by default; they're fundamentally different so the expression IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') doesn't make any sense. There are some times however, where you may wish to have ipaddress sort these for you anyway. If you need to do this, you can use this function as the key= argument to sorted(). Args: obj: either a Network or Address object. Returns: appropriate key. (Rt _BaseNetworkt_get_networks_keyRQt_get_address_keyR!(tobj((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytget_mixed_type_keys   t_IPAddressBasecBseZdZd ZedZedZedZedZdZ dZ e dZ e dZ e d Ze d Ze d Zd ZRS(uThe mother class.cCs |jS(u:Return the longhand version of the IP address as a string.(t_explode_shorthand_ip_string(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytexplodedscCs t|S(u;Return the shorthand version of the IP address as a string.(R?(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt compressedscCs |jS(uIThe name of the reverse DNS pointer for the IP address, e.g.: >>> ipaddress.ip_address("127.0.0.1").reverse_pointer '1.0.0.127.in-addr.arpa' >>> ipaddress.ip_address("2001:db8::1").reverse_pointer '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' (t_reverse_pointer(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytreverse_pointers cCs#dt|f}t|dS(Nu%200s has no version specified(ttypeR(Rtmsg((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRSscCsi|dkr.d}t|||jfn||jkred}t|||j|jfndS(Niu-%d (< 0) is not permitted as an IPv%d addressu2%d (>= 2**%d) is not permitted as an IPv%d address(R,RiRURT(RR3R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_check_int_addresss cCsDt|}||kr@d}t|||||jfndS(Nu~%r (len %d != %d) is not permitted as an IPv%d address. Did you pass in a bytes (str in Python 2) instead of a unicode object?(RAR,Ri(RR3t expected_lent address_lenR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_check_packed_address s    cCs|j|j|?AS(uTurn the prefix length into a bitwise netmask Args: prefixlen: An integer, the prefix length. Returns: An integer. (RU(tclst prefixlen((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_ip_int_from_prefix+s c Cst||j}|j|}||?}d|>d}||kr{|jd}t||d}d}t||n|S(uReturn prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones iiubigu&Netmask pattern %r mixes zeroes & ones(RPRTRR2( Rtip_intttrailing_zeroesRt leading_onestall_onestbyteslentdetailsR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_prefix_from_ip_int8s      cCsd|}t|dS(Nu%r is not a valid netmask(R.(Rt netmask_strR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_report_invalid_netmaskQs cCstjj|s"|j|nyt|}Wntk rR|j|nXd|kom|jkns|j|n|S(u Return prefix length from a numeric string Args: prefixlen_str: The string to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask i(t_BaseV4t_DECIMAL_DIGITSt issupersetRtintR2RT(Rt prefixlen_strR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_prefix_from_prefix_stringVs cCsy|j|}Wntk r3|j|nXy|j|SWntk rXnX||jN}y|j|SWntk r|j|nXdS(uTurn a netmask/hostmask string into a prefix length Args: ip_str: The netmask/hostmask to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask/hostmask N(t_ip_int_from_stringR,RRR2RU(Rtip_strR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_prefix_from_ip_stringos    cCs|jt|ffS(N(t __class__R?(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt __reduce__s((R)R*R-R+tpropertyRzR{R}RSRRt classmethodRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRxs   "RQcBsneZdZd ZdZdZdZdZdZdZ dZ dZ d Z d Z RS( uA generic IP object. This IP class contains the version independent methods which are used by single IP addresses. cCs|jS(N(RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__int__scCs?y&|j|jko$|j|jkSWntk r:tSXdS(N(RFRiRkR!(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR s  cCst|tstSt|ts;td||fn|j|jkrftd||fn|j|jkr|j|jkStS(Nu"%s and %s are not of the same typeu%%s and %s are not of the same version(RRxR!RQRRRiRFtFalse(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR$scCs*t|tstS|jt||S(N(RRR!RR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__add__scCs*t|tstS|jt||S(N(RRR!RR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__sub__scCsd|jjt|fS(Nu%s(%r)(RR)R?(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__repr__scCst|j|jS(N(R?t_string_from_ip_intRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__str__scCsttt|jS(N(thashthexRRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__hash__scCs |j|fS(N(Ri(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRuscCs|j|jffS(N(RRF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs((R)R*R-R+RR R$RRRRRRuR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRQs         RscBseZdZdZdZdZdZdZdZdZ dZ d Z d Z d Z ed Zed ZedZedZedZedZedZedZdZdZdZdd#dZdd#dZedZdZdZedZ edZ!edZ"ed Z#ed!Z$ed"Z%RS($u~A generic IP network object. This IP class contains the version independent methods which are used by networks. cCs i|_dS(N(t_cache(RR3((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__init__scCsd|jjt|fS(Nu%s(%r)(RR)R?(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsd|j|jfS(Nu%s/%d(RlR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRsccsNt|j}t|j}x)t|d|D]}|j|Vq2WdS(uGenerate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the network or broadcast addresses. iN(RRlRdRt_address_class(Rtnetworkt broadcasttx((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pythostssccsNt|j}t|j}x)t||dD]}|j|Vq2WdS(Ni(RRlRdRR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt__iter__scCst|j}t|j}|dkrZ|||krItdn|j||S|d7}|||krtdn|j||SdS(Niuaddress out of rangei(RRlRdt IndexErrorR(RtnRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt __getitem__s  cCst|tstSt|ts;td||fn|j|jkrftd||fn|j|jkr|j|jkS|j|jkr|j|jkStS(Nu"%s and %s are not of the same typeu%%s and %s are not of the same version( RRxR!RsRRRiRltnetmaskR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR$scCs]yD|j|jkoB|j|jkoBt|jt|jkSWntk rXtSXdS(N(RiRlRRRkR!(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR s   cCs tt|jt|jAS(N(RRRlR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs`|j|jkrtSt|tr)tSt|jt|jkoYt|jkSSdS(N(RiRRRsRRlRFRd(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt __contains__s  cCs:|j|kp9|j|kp9|j|kp9|j|kS(u*Tell if self is partly contained in other.(RlRd(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytoverlaps)scCsW|jjd}|dkrS|jt|jt|jB}||jd other eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') IPv6Network('2001:db8::2000/124') > IPv6Network('2001:db8::1000/124') Raises: TypeError if the IP versions are different. u"%s and %s are not of the same typeiii(RiRRRlR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytcompare_networkss!cCs|j|j|jfS(uNetwork-only key function. Returns an object that identifies this address' network and netmask. This function is a suitable "key" argument for sorted() and list.sort(). (RiRlR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRtsic cs4|j|jkr|VdS|dk rp||jkrEtdn|dkr`tdn||j}n|dkrtdn|j|}||jkrtd||fnt|j}t|jd}t|jd|?}x4t|||D] }|j ||f}|Vq WdS(uThe subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network) Nunew prefix must be longeriu(cannot set prefixlen_diff and new_prefixiuprefix length diff must be > 0u0prefix length diff %d is invalid for netblock %s( RjRTR`R2RRlRdRRR( Rtprefixlen_difft new_prefixt new_prefixlenRRRtnew_addrtcurrent((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRfs,    cCs|jdkr|S|dk rh||jkr=tdn|dkrXtdn|j|}n|j|}|dkrtd|j|fn|jt|jt|j|>@|fS(uThe supernet containing the current network. Args: prefixlen_diff: An integer, the amount the prefix length of the network should be decreased by. For example, given a /24 network and a prefixlen_diff of 3, a supernet with a /21 netmask is returned. Returns: An IPv4 network object. Raises: ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a negative prefix length. OR If prefixlen_diff and new_prefix are both set or new_prefix is a larger number than the current prefix (larger number means a smaller network) iunew prefix must be shorteriu(cannot set prefixlen_diff and new_prefixu;current prefixlen is %d, cannot have a prefixlen_diff of %dN(RjR`R2RRRRlR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR^s     cCs|jjo|jjS(uTest if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. (Rlt is_multicastRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRAs cCsp|j|jkrtSt|drVt|drV|j|jkoU|j|jkStdt|dS(Nunetwork_addressubroadcast_addressu9Unable to test subnet containment with element of type %s(RiRthasattrRlRdRRR~(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRMscCsp|j|jkrtSt|drVt|drV|j|jkoU|j|jkStdt|dS(Nunetwork_addressubroadcast_addressu9Unable to test subnet containment with element of type %s(RiRRRlRdRRR~(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt supernet_of[scCs|jjo|jjS(uTest if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. (Rlt is_reservedRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRis cCs|jjo|jjS(uTest if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. (Rlt is_link_localRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRus cCs|jjo|jjS(uTest if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. (Rlt is_privateRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs|j S(uTest if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. (R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt is_globals cCs|jjo|jjS(uTest if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. (Rltis_unspecifiedRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs|jjo|jjS(uTest if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. (Rlt is_loopbackRd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs N(&R)R*R-RRRRRRR$R RRRRRdRRRRRRRRRRtR`RfR^RRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRssD          K 0 5)       Rc BseZdZdZdZdedZedZeddddd d d d d g Z eZ iZ dZ e dZe dZe dZe dZdZdZedZedZRS(uyBase IPv4 object. The following methods are used by IPv4 objects in both single IP addresses and networks. iiiu 0123456789iiiiiiiiicCs t|S(N(R?(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRyscCs||jkrt|tr'|}n6y|j|}Wn tk r\|j|}nXt|j|}||f|j| 255) not permitted(R2RRRAR(Rt octet_strRt octet_int((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs#djdt|ddDS(uTurns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation. u.css@|]6}tt|tr1tjd|dn|VqdS(s!BiN(R?RR1RR(t.0R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pys -siubig(tjoinR(RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR"s cCs|jd}y5gtt|D]}||jkr"|^q"}Wntk rXtSXt|t|krutS|d|dkrtStS(uTest if the IP string is a hostmask (rather than a netmask). Args: ip_str: A string, the potential hostmask. Returns: A boolean, True if the IP string is a hostmask. u.ii(R@RRt_valid_mask_octetsR2RRAtTrue(RRRORtparts((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt _is_hostmask2s 5 cCs3t|jdddd}dj|dS(uReturn the reverse DNS pointer name for the IPv4 address. This implements the method described in RFC1035 3.5. u.Niu .in-addr.arpa(R?R@R(Rtreverse_octets((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR|Gs"cCs|jS(N(RT(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt max_prefixlenPscCs|jS(N(Ri(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRSTs((R)R*R-R+Rit IPV4LENGTHRUt frozensetRRRTRRyRRRRRRR|RRRS(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs" ' %  R/cBseZdZd ZdZedZedZedZedZ edZ ed Z ed Z ed Z RS( u/Represent and manipulate single IPv4 Addresses.u_ipu __weakref__cCst|tr)|j|||_dSt|trj|j|dt|}t|d|_dSt|}d|krt d|n|j ||_dS(u Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv4Address('192.0.2.1') == IPv4Address(3221225985). or, more generally IPv4Address(int(IPv4Address('192.0.2.1'))) == IPv4Address('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. Niubigu/uUnexpected '/' in %r( RRRRFR1RRR R?R,R(RR3tbvstaddr_str((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR_s     cCs t|jS(u*The binary representation of this address.(R=RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytpackedscCs||jjkS(uTest if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within the reserved IPv4 Network range. (t _constantst_reserved_network(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cs tfdjjDS(uTest if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry. c3s|]}|kVqdS(N((RRZ(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pys s(tanyRt_private_networks(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs||jjko|j S(N(Rt_public_networkR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs||jjkS(uTest if the address is reserved for multicast use. Returns: A boolean, True if the address is multicast. See RFC 3171 for details. (Rt_multicast_network(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs||jjkS(uTest if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3. (Rt_unspecified_address(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs||jjkS(uTest if the address is a loopback address. Returns: A boolean, True if the address is a loopback per RFC 3330. (Rt_loopback_network(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs||jjkS(uTest if the address is reserved for link-local. Returns: A boolean, True if the address is link-local per RFC 3927. (Rt_linklocal_network(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(u_ipu __weakref__(R)R*R-R+RRRRRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR/Ys $     R9cBszeZdZdZdZdZdZejZe dZ e dZ e dZ e dZ RS( cCs;t|ttfrGtj||t|j|_|j|_ dSt|t rtj||dt |dkrt |d|_ n |j|_ t|dt |_|jj|_|jj|_dSt|}tj||dt|dt |_|jj |_ |jj|_|jj|_dS(NiiR7(RR1RR/RR5RFRRTRjttupleRARRRRRC(RR3RB((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(   cCs d|j|j|jjfS(Nu%s/%d(RRFRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsVtj||}| s%|tkr)|Sy|j|jkSWntk rQtSXdS(N(R/R R!RRkR(RRt address_equal((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR s cCsOtj||}|tkr"tSy|j|jkSWntk rJtSXdS(N(R/R$R!RRkR(RRt address_less((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR$s  cCs|j|jAt|jjAS(N(RFRjRRRl(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs t|jS(N(R/RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRK scCsd|j|j|jfS(Nu%s/%s(RRFRj(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsd|j|j|jfS(Nu%s/%s(RRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsd|j|j|jfS(Nu%s/%s(RRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(R)R*RRR R$RRxRRRKRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR9s    R5cBs/eZdZeZedZedZRS(ueThis class represents and manipulates 32-bit IPv4 network + addresses.. Attributes: [examples for IPv4Network('192.0.2.0/27')] .network_address: IPv4Address('192.0.2.0') .hostmask: IPv4Address('0.0.0.31') .broadcast_address: IPv4Address('192.0.2.32') .netmask: IPv4Address('255.255.255.224') .prefixlen: 27 cCstj||t|ttfrVt||_|j|j\|_ |_ dSt|t rt |dkr|d}n |j}t|d|_|j|\|_ |_ t |j}|t |j @|kr|rtd|qt|t |j @|_ndSt|}t|j|d|_t |dkrf|d}n |j}|j|\|_ |_ |rtt |jt |j @|jkrtd|qntt |jt |j @|_|j |jdkr|j|_ndS(uInstantiate a new IPv4 network object. Args: address: A string or integer representing the IP [& network]. '192.0.2.0/24' '192.0.2.0/255.255.255.0' '192.0.0.2/0.0.0.255' are all functionally the same in IPv4. Similarly, '192.0.2.1' '192.0.2.1/255.255.255.255' '192.0.2.1/32' are also functionally equivalent. That is to say, failing to provide a subnetmask will create an object with a mask of /32. If the mask (portion after the / in the argument) is given in dotted quad form, it is treated as a netmask if it starts with a non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it starts with a zero field (e.g. 0.255.255.255 == /8), with the single exception of an all-zero mask which is treated as a netmask == /0. If no mask is given, a default of /32 is used. Additionally, an integer can be passed, so IPv4Network('192.0.2.1') == IPv4Network(3221225985) or, more generally IPv4Interface(int(IPv4Interface('192.0.2.1'))) == IPv4Interface('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. NetmaskValueError: If the netmask isn't valid for an IPv4 address. ValueError: If strict is True and a network address is not supplied. Niiu%s has host bits seti(RsRRRR1R/RlRRTRRjRRARR2RCRRR(RR3R7RRRB((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR0sB%      cCs3|jtdko'|jtdk o2|j S(uTest if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry. u 100.64.0.0/10(RlR5RdR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs ( R)R*R-R/RRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR5!s  Ut_IPv4ConstantscBseZedZedZedZedZedededededededed ed ed ed ed ededgZedZe dZ RS(u169.254.0.0/16u 127.0.0.0/8u 224.0.0.0/4u 100.64.0.0/10u 0.0.0.0/8u 10.0.0.0/8u 172.16.0.0/12u 192.0.0.0/29u192.0.0.170/31u 192.0.2.0/24u192.168.0.0/16u 198.18.0.0/15u198.51.100.0/24u203.0.113.0/24u 240.0.0.0/4u255.255.255.255/32u0.0.0.0( R)R*R5RRRRRRR/R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(                  t_BaseV6cBseZdZdZdZdedZdZedZ eZ iZ e dZ e dZe dZe d Ze dd Zd Zd Zed ZedZRS(uyBase IPv6 object. The following methods are used by IPv6 objects in both single IP addresses and networks. iiiiu0123456789ABCDEFabcdefcCsl||jkrat|tr'|}n|j|}t|j|}||f|j|} || sC|dk rxd |}t|n| }qCqCW|dk r]|} t||d } |ds| d 8} | rd}t||qn|ds| d 8} | rd}t||qn|j| | } | d krd}t||jd |fqnt||jkrd}t||j|fn|dsd}t||n|dsd}t||nt|} d} d} yd} x5t | D]'} | d K} | |j || O} qW| d | K} x9t | dD]'} | d K} | |j || O} qRW| SWn)t k r}td||fnXdS(uTurn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. uAddress cannot be emptyu:iu At least %d parts expected in %ru.iu%s in %ru%xiiiu!At most %d colons permitted in %ru At most one '::' permitted in %riu0Leading ':' only permitted as part of '::' in %ru1Trailing ':' only permitted as part of '::' in %ru/Expected at most %d other parts with '::' in %ru,Exactly %d parts expected without '::' in %rN( R,R@RAR/R]RFRat _HEXTET_COUNTR`Rtranget _parse_hextetR2(RRRt _min_partsRtipv4_intRt _max_partst skip_indexRtparts_hitparts_lot parts_skippedR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs            #     cCs]|jj|s%td|nt|dkrPd}t||nt|dS(u&Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF]. uOnly hex digits permitted in %riu$At most 4 characters permitted in %ri(t _HEX_DIGITSRR2RAR(Rt hextet_strR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyREs c Csd}d}d}d}xot|D]a\}}|dkrz|d7}|dkr\|}n||kr|}|}qq%d}d}q%W|dkr||}|t|kr|dg7}ndg|||+|dkrdg|}qn|S(uCompresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings. iiu0iu(t enumerateRA( Rthextetstbest_doublecolon_starttbest_doublecolon_lentdoublecolon_starttdoublecolon_lentindexthextettbest_doublecolon_end((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt_compress_hextets_s.         cCs|d krt|j}n||jkr<tdnd|}gtdddD]$}dt|||d!d^qY}|j|}dj|S( u,Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones. uIPv6 address is too largeu%032xii iu%xiu:N(R`RRFRUR2RRR(RRthex_strRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs  =cCst|tr!t|j}n-t|trBt|j}n t|}|j|}d|}gtdddD]}|||d!^qz}t|ttfrddj ||j fSdj |S(uExpand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. u%032xii iu%s/%du:( RR6R?RlR:RKRRRsRRj(RRRRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRys   0cCs3|jdddjdd}dj|dS(uReturn the reverse DNS pointer name for the IPv6 address. This implements the method described in RFC3596 2.5. Niu:uu.u .ip6.arpa(RztreplaceR(Rt reverse_chars((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR|s"cCs|jS(N(RT(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs|jS(N(Ri(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRSs(N(R)R*R-R+Rit IPV6LENGTHRURRRRTRRRRRRR`RRyR|RRRS(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs$ i0  R0cBseZdZdZdZedZedZedZedZ edZ ed Z ed Z ed Z ed Zed ZedZedZRS(u/Represent and manipulate single IPv6 Addresses.u_ipu __weakref__cCst|tr)|j|||_dSt|trj|j|dt|}t|d|_dSt|}d|krt d|n|j ||_dS(uInstantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. Niubigu/uUnexpected '/' in %r( RRRRFR1RRR R?R,R(RR3RR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs     cCs t|jS(u*The binary representation of this address.(R>RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs||jjkS(uTest if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. (RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cs tfdjjDS(uTest if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. c3s|]}|kVqdS(N((RR(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pys s(RRt_reserved_networks(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs cCs||jjkS(uTest if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. (RR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs||jjkS(u`Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. (Rt_sitelocal_network(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt is_site_local#s cs tfdjjDS(uTest if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry. c3s|]}|kVqdS(N((RRZ(R(s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pys :s(RRR(R((Rs9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR1s cCs|j S(uTest if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. (R(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR<s cCs |jdkS(uTest if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. i(RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRGs cCs |jdkS(uTest if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. i(RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRRs cCs(|jd?dkrdSt|jd@S(uReturn the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. i iIN(RFR`R/(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt ipv4_mapped]s cCs@|jd?dkrdSt|jd?d@t|jd@fS(uTuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) i`i i@IN(RFR`R/(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pytteredojs cCs,|jd?dkrdSt|jd?d@S(uReturn the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. ipi iPIN(RFR`R/(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt sixtofourys (u_ipu __weakref__(R)R*R-R+RRRRRRR RRRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR0s %        R:cBseZdZdZdZdZdZejZe dZ e dZ e dZ e dZ e d Ze d ZRS( cCs;t|ttfrGtj||t|j|_|j|_ dSt|t rtj||dt |dkrt |d|_ n |j|_ t|dt |_|jj|_|jj|_dSt|}tj||dt|dt |_|jj|_|jj |_ |jj|_dS(NiiR7(RR1RR0RR6RFRRTRjRRARRRRRC(RR3RB((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(   cCs d|j|j|jjfS(Nu%s/%d(RRFRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsVtj||}| s%|tkr)|Sy|j|jkSWntk rQtSXdS(N(R0R R!RRkR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR s cCsOtj||}|tkr"tSy|j|jkSWntk rJtSXdS(N(R0R$R!RRkR(RRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR$s  cCs|j|jAt|jjAS(N(RFRjRRRl(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs t|jS(N(R0RF(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRKscCsd|j|j|jfS(Nu%s/%s(RRFRj(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsd|j|j|jfS(Nu%s/%s(RRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCsd|j|j|jfS(Nu%s/%s(RRFR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs|jdko|jjS(Ni(RFRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRscCs|jdko|jjS(Ni(RFRR(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRs(R)R*RRR R$RRxRRRKRRRRR(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR:s    R6cBs8eZdZeZedZdZedZ RS(uvThis class represents and manipulates 128-bit IPv6 networks. Attributes: [examples for IPv6('2001:db8::1000/124')] .network_address: IPv6Address('2001:db8::1000') .hostmask: IPv6Address('::f') .broadcast_address: IPv6Address('2001:db8::100f') .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') .prefixlen: 124 cCstj||t|ttfrVt||_|j|j\|_ |_ dSt|t rt |dkr|d}n |j}|j|\|_ |_ t|d|_t |j}|t |j @|kr|rtd|qt|t |j @|_ndSt|}t|j|d|_t |dkrf|d}n |j}|j|\|_ |_ |rtt |jt |j @|jkrtd|qntt |jt |j @|_|j |jdkr|j|_ndS(uInstantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. Niiu%s has host bits seti(RsRRR1RR0RlRRTRRjRRARR2RCRRR(RR3R7RRRB((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRsB       ccsRt|j}t|j}x-t|d|dD]}|j|Vq6WdS(uGenerate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the Subnet-Router anycast address. iN(RRlRdRR(RRRR((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR< scCs|jjo|jjS(u`Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. (RlR Rd(R((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR H s ( R)R*R-R0RRRRRR (((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyR6s   O t_IPv6ConstantscBseZedZedZedededededededed ed edg Zed ed ed ededededededededededededgZedZRS(u fe80::/10uff00::/8u::1/128u::/128u ::ffff:0:0/96u100::/64u 2001::/23u 2001:2::/48u 2001:db8::/32u 2001:10::/28ufc00::/7u::/8u100::/8u200::/7u400::/6u800::/5u1000::/4u4000::/3u6000::/3u8000::/3uA000::/3uC000::/3uE000::/4uF000::/5uF800::/6uFE00::/9u fec0::/10(R)R*R6RRRR R (((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyRX s*           (9R-t __future__RRRt __version__RRtlongt NameErrortunicodeR?tstrR1RRt from_bytesR RkRRRRtobjectRRR R2R,R.R4RR8R;R=R>RCRLRPR[RhRrRwRxRQRsRR/R9R5RRRR0R:R6R(((s9/usr/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyt sx              ) $ $ #      7 1 6 =vRr V{!PK.e[j[0&&certifi/core.pynu[#!/usr/bin/env python # -*- coding: utf-8 -*- """ certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem. """ import os import warnings class DeprecatedBundleWarning(DeprecationWarning): """ The weak security bundle is being deprecated. Please bother your service provider to get them to stop using cross-signed roots. """ def where(): return '/etc/pki/tls/certs/ca-bundle.crt' def old_where(): warnings.warn( "The weak security bundle has been removed. certifi.old_where() is now an alias " "of certifi.where(). Please update your code to use certifi.where() instead. " "certifi.old_where() will be removed in 2018.", DeprecatedBundleWarning ) return where() if __name__ == '__main__': print(where()) PK.e[+))certifi/__main__.pynu[from certifi import where print(where()) PK.e[|certifi/__main__.pycnu[ abc@sddlmZeGHdS(i(twhereN(tcertifiR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.pytsPK.e[$ʴ  certifi/__init__.pycnu[ abc@s ddlmZmZdZdS(i(twheret old_wheres 2018.01.18N(tcoreRRt __version__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.pytsPK.e[|certifi/__main__.pyonu[ abc@sddlmZeGHdS(i(twhereN(tcertifiR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.pytsPK.e[8RLLcertifi/core.pycnu[ abc@sadZddlZddlZdefdYZdZdZedkr]eGHndS(sU certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem. iNtDeprecatedBundleWarningcBseZdZRS(s The weak security bundle is being deprecated. Please bother your service provider to get them to stop using cross-signed roots. (t__name__t __module__t__doc__(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyRscCsdS(Ns /etc/pki/tls/certs/ca-bundle.crt((((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pytwherescCstjdttS(NsThe weak security bundle has been removed. certifi.old_where() is now an alias of certifi.where(). Please update your code to use certifi.where() instead. certifi.old_where() will be removed in 2018.(twarningstwarnRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyt old_wherest__main__(RtosRtDeprecationWarningRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyt s    PK.e[$ʴ  certifi/__init__.pyonu[ abc@s ddlmZmZdZdS(i(twheret old_wheres 2018.01.18N(tcoreRRt __version__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.pytsPK.e[8RLLcertifi/core.pyonu[ abc@sadZddlZddlZdefdYZdZdZedkr]eGHndS(sU certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem. iNtDeprecatedBundleWarningcBseZdZRS(s The weak security bundle is being deprecated. Please bother your service provider to get them to stop using cross-signed roots. (t__name__t __module__t__doc__(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyRscCsdS(Ns /etc/pki/tls/certs/ca-bundle.crt((((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pytwherescCstjdttS(NsThe weak security bundle has been removed. certifi.old_where() is now an alias of certifi.where(). Please update your code to use certifi.where() instead. certifi.old_where() will be removed in 2018.(twarningstwarnRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyt old_wherest__main__(RtosRtDeprecationWarningRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyt s    PK.e[pG??certifi/__init__.pynu[from .core import where, old_where __version__ = "2018.01.18" PK.e[Ħ0909 ipaddress.pynu[# Copyright 2007 Google Inc. # Licensed to PSF under a Contributor Agreement. """A fast, lightweight IPv4/IPv6 manipulation library in Python. This library is used to create/poke/manipulate IPv4 and IPv6 addresses and networks. """ from __future__ import unicode_literals import itertools import struct __version__ = '1.0.17' # Compatibility functions _compat_int_types = (int,) try: _compat_int_types = (int, long) except NameError: pass try: _compat_str = unicode except NameError: _compat_str = str assert bytes != str if b'\0'[0] == 0: # Python 3 semantics def _compat_bytes_to_byte_vals(byt): return byt else: def _compat_bytes_to_byte_vals(byt): return [struct.unpack(b'!B', b)[0] for b in byt] try: _compat_int_from_byte_vals = int.from_bytes except AttributeError: def _compat_int_from_byte_vals(bytvals, endianess): assert endianess == 'big' res = 0 for bv in bytvals: assert isinstance(bv, _compat_int_types) res = (res << 8) + bv return res def _compat_to_bytes(intval, length, endianess): assert isinstance(intval, _compat_int_types) assert endianess == 'big' if length == 4: if intval < 0 or intval >= 2 ** 32: raise struct.error("integer out of range for 'I' format code") return struct.pack(b'!I', intval) elif length == 16: if intval < 0 or intval >= 2 ** 128: raise struct.error("integer out of range for 'QQ' format code") return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) else: raise NotImplementedError() if hasattr(int, 'bit_length'): # Not int.bit_length , since that won't work in 2.7 where long exists def _compat_bit_length(i): return i.bit_length() else: def _compat_bit_length(i): for res in itertools.count(): if i >> res == 0: return res def _compat_range(start, end, step=1): assert step > 0 i = start while i < end: yield i i += step class _TotalOrderingMixin(object): __slots__ = () # Helper that derives the other comparison operations from # __lt__ and __eq__ # We avoid functools.total_ordering because it doesn't handle # NotImplemented correctly yet (http://bugs.python.org/issue10042) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal def __lt__(self, other): raise NotImplementedError def __le__(self, other): less = self.__lt__(other) if less is NotImplemented or not less: return self.__eq__(other) return less def __gt__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not (less or equal) def __ge__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented return not less IPV4LENGTH = 32 IPV6LENGTH = 128 class AddressValueError(ValueError): """A Value Error related to the address.""" class NetmaskValueError(ValueError): """A Value Error related to the netmask.""" def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % address) def ip_network(address, strict=True): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP network. Either IPv4 or IPv6 networks may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Network or IPv6Network object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Or if the network has host bits set. """ try: return IPv4Network(address, strict) except (AddressValueError, NetmaskValueError): pass try: return IPv6Network(address, strict) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 network. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % address) def ip_interface(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Interface or IPv6Interface object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Notes: The IPv?Interface classes describe an Address on a particular Network, so they're basically a combination of both the Address and Network classes. """ try: return IPv4Interface(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Interface(address) except (AddressValueError, NetmaskValueError): pass raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % address) def v4_int_to_packed(address): """Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. """ try: return _compat_to_bytes(address, 4, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv4") def v6_int_to_packed(address): """Represent an address as 16 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv6 IP address. Returns: The integer address packed as 16 bytes in network (big-endian) order. """ try: return _compat_to_bytes(address, 16, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv6") def _split_optional_netmask(address): """Helper to split the netmask and raise AddressValueError if needed""" addr = _compat_str(address).split('/') if len(addr) > 2: raise AddressValueError("Only one '/' permitted in %r" % address) return addr def _find_address_range(addresses): """Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. """ it = iter(addresses) first = last = next(it) for ip in it: if ip._ip != last._ip + 1: yield first, last first = ip last = ip yield first, last def _count_righthand_zero_bits(number, bits): """Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number. """ if number == 0: return bits return min(bits, _compat_bit_length(~number & (number - 1))) def summarize_address_range(first, last): """Summarize a network range given the first and last IP addresses. Example: >>> list(summarize_address_range(IPv4Address('192.0.2.0'), ... IPv4Address('192.0.2.130'))) ... #doctest: +NORMALIZE_WHITESPACE [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), IPv4Network('192.0.2.130/32')] Args: first: the first IPv4Address or IPv6Address in the range. last: the last IPv4Address or IPv6Address in the range. Returns: An iterator of the summarized IPv(4|6) network objects. Raise: TypeError: If the first and last objects are not IP addresses. If the first and last objects are not the same version. ValueError: If the last object is not greater than the first. If the version of the first address is not 4 or 6. """ if (not (isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress))): raise TypeError('first and last must be IP addresses, not networks') if first.version != last.version: raise TypeError("%s and %s are not of the same version" % ( first, last)) if first > last: raise ValueError('last IP address must be greater than first') if first.version == 4: ip = IPv4Network elif first.version == 6: ip = IPv6Network else: raise ValueError('unknown IP version') ip_bits = first._max_prefixlen first_int = first._ip last_int = last._ip while first_int <= last_int: nbits = min(_count_righthand_zero_bits(first_int, ip_bits), _compat_bit_length(last_int - first_int + 1) - 1) net = ip((first_int, ip_bits - nbits)) yield net first_int += 1 << nbits if first_int - 1 == ip._ALL_ONES: break def _collapse_addresses_internal(addresses): """Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. """ # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, # last.network_address <= net.network_address is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net def collapse_addresses(addresses): """Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects. """ addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) # sort and dedup ips = sorted(set(ips)) # find consecutive address ranges in the sorted sequence and summarize them if ips: for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) return _collapse_addresses_internal(addrs + nets) def get_mixed_type_key(obj): """Return a key suitable for sorting between networks and addresses. Address and Network objects are not sortable by default; they're fundamentally different so the expression IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') doesn't make any sense. There are some times however, where you may wish to have ipaddress sort these for you anyway. If you need to do this, you can use this function as the key= argument to sorted(). Args: obj: either a Network or Address object. Returns: appropriate key. """ if isinstance(obj, _BaseNetwork): return obj._get_networks_key() elif isinstance(obj, _BaseAddress): return obj._get_address_key() return NotImplemented class _IPAddressBase(_TotalOrderingMixin): """The mother class.""" __slots__ = () @property def exploded(self): """Return the longhand version of the IP address as a string.""" return self._explode_shorthand_ip_string() @property def compressed(self): """Return the shorthand version of the IP address as a string.""" return _compat_str(self) @property def reverse_pointer(self): """The name of the reverse DNS pointer for the IP address, e.g.: >>> ipaddress.ip_address("127.0.0.1").reverse_pointer '1.0.0.127.in-addr.arpa' >>> ipaddress.ip_address("2001:db8::1").reverse_pointer '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' """ return self._reverse_pointer() @property def version(self): msg = '%200s has no version specified' % (type(self),) raise NotImplementedError(msg) def _check_int_address(self, address): if address < 0: msg = "%d (< 0) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._version)) if address > self._ALL_ONES: msg = "%d (>= 2**%d) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._max_prefixlen, self._version)) def _check_packed_address(self, address, expected_len): address_len = len(address) if address_len != expected_len: msg = ( '%r (len %d != %d) is not permitted as an IPv%d address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' ) raise AddressValueError(msg % (address, address_len, expected_len, self._version)) @classmethod def _ip_int_from_prefix(cls, prefixlen): """Turn the prefix length into a bitwise netmask Args: prefixlen: An integer, the prefix length. Returns: An integer. """ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) @classmethod def _prefix_from_ip_int(cls, ip_int): """Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones """ trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = _compat_to_bytes(ip_int, byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen @classmethod def _report_invalid_netmask(cls, netmask_str): msg = '%r is not a valid netmask' % netmask_str raise NetmaskValueError(msg) @classmethod def _prefix_from_prefix_string(cls, prefixlen_str): """Return prefix length from a numeric string Args: prefixlen_str: The string to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask """ # int allows a leading +/- as well as surrounding whitespace, # so we ensure that isn't the case if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): cls._report_invalid_netmask(prefixlen_str) try: prefixlen = int(prefixlen_str) except ValueError: cls._report_invalid_netmask(prefixlen_str) if not (0 <= prefixlen <= cls._max_prefixlen): cls._report_invalid_netmask(prefixlen_str) return prefixlen @classmethod def _prefix_from_ip_string(cls, ip_str): """Turn a netmask/hostmask string into a prefix length Args: ip_str: The netmask/hostmask to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask/hostmask """ # Parse the netmask/hostmask like an IP address. try: ip_int = cls._ip_int_from_string(ip_str) except AddressValueError: cls._report_invalid_netmask(ip_str) # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). # Note that the two ambiguous cases (all-ones and all-zeroes) are # treated as netmasks. try: return cls._prefix_from_ip_int(ip_int) except ValueError: pass # Invert the bits, and try matching a /0+1+/ hostmask instead. ip_int ^= cls._ALL_ONES try: return cls._prefix_from_ip_int(ip_int) except ValueError: cls._report_invalid_netmask(ip_str) def __reduce__(self): return self.__class__, (_compat_str(self),) class _BaseAddress(_IPAddressBase): """A generic IP object. This IP class contains the version independent methods which are used by single IP addresses. """ __slots__ = () def __int__(self): return self._ip def __eq__(self, other): try: return (self._ip == other._ip and self._version == other._version) except AttributeError: return NotImplemented def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseAddress): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self._ip != other._ip: return self._ip < other._ip return False # Shorthand for Integer addition and subtraction. This is not # meant to ever support addition/subtraction of addresses. def __add__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) + other) def __sub__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) - other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return _compat_str(self._string_from_ip_int(self._ip)) def __hash__(self): return hash(hex(int(self._ip))) def _get_address_key(self): return (self._version, self) def __reduce__(self): return self.__class__, (self._ip,) class _BaseNetwork(_IPAddressBase): """A generic IP network object. This IP class contains the version independent methods which are used by networks. """ def __init__(self, address): self._cache = {} def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return '%s/%d' % (self.network_address, self.prefixlen) def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the network or broadcast addresses. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast): yield self._address_class(x) def __iter__(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network, broadcast + 1): yield self._address_class(x) def __getitem__(self, n): network = int(self.network_address) broadcast = int(self.broadcast_address) if n >= 0: if network + n > broadcast: raise IndexError('address out of range') return self._address_class(network + n) else: n += 1 if broadcast + n < network: raise IndexError('address out of range') return self._address_class(broadcast + n) def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseNetwork): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self.network_address != other.network_address: return self.network_address < other.network_address if self.netmask != other.netmask: return self.netmask < other.netmask return False def __eq__(self, other): try: return (self._version == other._version and self.network_address == other.network_address and int(self.netmask) == int(other.netmask)) except AttributeError: return NotImplemented def __hash__(self): return hash(int(self.network_address) ^ int(self.netmask)) def __contains__(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if isinstance(other, _BaseNetwork): return False # dealing with another address else: # address return (int(self.network_address) <= int(other._ip) <= int(self.broadcast_address)) def overlaps(self, other): """Tell if self is partly contained in other.""" return self.network_address in other or ( self.broadcast_address in other or ( other.network_address in self or ( other.broadcast_address in self))) @property def broadcast_address(self): x = self._cache.get('broadcast_address') if x is None: x = self._address_class(int(self.network_address) | int(self.hostmask)) self._cache['broadcast_address'] = x return x @property def hostmask(self): x = self._cache.get('hostmask') if x is None: x = self._address_class(int(self.netmask) ^ self._ALL_ONES) self._cache['hostmask'] = x return x @property def with_prefixlen(self): return '%s/%d' % (self.network_address, self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self.network_address, self.netmask) @property def with_hostmask(self): return '%s/%s' % (self.network_address, self.hostmask) @property def num_addresses(self): """Number of hosts in the current subnet.""" return int(self.broadcast_address) - int(self.network_address) + 1 @property def _address_class(self): # Returning bare address objects (rather than interfaces) allows for # more consistent behaviour across the network address, broadcast # address and individual host addresses. msg = '%200s has no associated address class' % (type(self),) raise NotImplementedError(msg) @property def prefixlen(self): return self._prefixlen def address_exclude(self, other): """Remove an address from a larger block. For example: addr1 = ip_network('192.0.2.0/28') addr2 = ip_network('192.0.2.1/32') list(addr1.address_exclude(addr2)) = [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] or IPv6: addr1 = ip_network('2001:db8::1/32') addr2 = ip_network('2001:db8::1/128') list(addr1.address_exclude(addr2)) = [ip_network('2001:db8::1/128'), ip_network('2001:db8::2/127'), ip_network('2001:db8::4/126'), ip_network('2001:db8::8/125'), ... ip_network('2001:db8:8000::/33')] Args: other: An IPv4Network or IPv6Network object of the same type. Returns: An iterator of the IPv(4|6)Network objects which is self minus other. Raises: TypeError: If self and other are of differing address versions, or if other is not a network object. ValueError: If other is not completely contained by self. """ if not self._version == other._version: raise TypeError("%s and %s are not of the same version" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError("%s is not a network object" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) def compare_networks(self, other): """Compare two IP objects. This is only concerned about the comparison of the integer representation of the network addresses. This means that the host bits aren't considered at all in this method. If you want to compare host bits, you can easily enough do a 'HostA._ip < HostB._ip' Args: other: An IP object. Returns: If the IP versions of self and other are the same, returns: -1 if self < other: eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') IPv6Network('2001:db8::1000/124') < IPv6Network('2001:db8::2000/124') 0 if self == other eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') IPv6Network('2001:db8::1000/124') == IPv6Network('2001:db8::1000/124') 1 if self > other eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') IPv6Network('2001:db8::2000/124') > IPv6Network('2001:db8::1000/124') Raises: TypeError if the IP versions are different. """ # does this need to raise a ValueError? if self._version != other._version: raise TypeError('%s and %s are not of the same type' % ( self, other)) # self._version == other._version below here: if self.network_address < other.network_address: return -1 if self.network_address > other.network_address: return 1 # self.network_address == other.network_address below here: if self.netmask < other.netmask: return -1 if self.netmask > other.netmask: return 1 return 0 def _get_networks_key(self): """Network-only key function. Returns an object that identifies this address' network and netmask. This function is a suitable "key" argument for sorted() and list.sort(). """ return (self._version, self.network_address, self.netmask) def subnets(self, prefixlen_diff=1, new_prefix=None): """The subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network) """ if self._prefixlen == self._max_prefixlen: yield self return if new_prefix is not None: if new_prefix < self._prefixlen: raise ValueError('new prefix must be longer') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = new_prefix - self._prefixlen if prefixlen_diff < 0: raise ValueError('prefix length diff must be > 0') new_prefixlen = self._prefixlen + prefixlen_diff if new_prefixlen > self._max_prefixlen: raise ValueError( 'prefix length diff %d is invalid for netblock %s' % ( new_prefixlen, self)) start = int(self.network_address) end = int(self.broadcast_address) + 1 step = (int(self.hostmask) + 1) >> prefixlen_diff for new_addr in _compat_range(start, end, step): current = self.__class__((new_addr, new_prefixlen)) yield current def supernet(self, prefixlen_diff=1, new_prefix=None): """The supernet containing the current network. Args: prefixlen_diff: An integer, the amount the prefix length of the network should be decreased by. For example, given a /24 network and a prefixlen_diff of 3, a supernet with a /21 netmask is returned. Returns: An IPv4 network object. Raises: ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a negative prefix length. OR If prefixlen_diff and new_prefix are both set or new_prefix is a larger number than the current prefix (larger number means a smaller network) """ if self._prefixlen == 0: return self if new_prefix is not None: if new_prefix > self._prefixlen: raise ValueError('new prefix must be shorter') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = self._prefixlen - new_prefix new_prefixlen = self.prefixlen - prefixlen_diff if new_prefixlen < 0: raise ValueError( 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (self.prefixlen, prefixlen_diff)) return self.__class__(( int(self.network_address) & (int(self.netmask) << prefixlen_diff), new_prefixlen )) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return (self.network_address.is_multicast and self.broadcast_address.is_multicast) def subnet_of(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if (hasattr(other, 'network_address') and hasattr(other, 'broadcast_address')): return (other.network_address <= self.network_address and other.broadcast_address >= self.broadcast_address) # dealing with another address else: raise TypeError('Unable to test subnet containment with element ' 'of type %s' % type(other)) def supernet_of(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if (hasattr(other, 'network_address') and hasattr(other, 'broadcast_address')): return (other.network_address >= self.network_address and other.broadcast_address <= self.broadcast_address) # dealing with another address else: raise TypeError('Unable to test subnet containment with element ' 'of type %s' % type(other)) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return (self.network_address.is_reserved and self.broadcast_address.is_reserved) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return (self.network_address.is_link_local and self.broadcast_address.is_link_local) @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return (self.network_address.is_private and self.broadcast_address.is_private) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return (self.network_address.is_unspecified and self.broadcast_address.is_unspecified) @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return (self.network_address.is_loopback and self.broadcast_address.is_loopback) class _BaseV4(object): """Base IPv4 object. The following methods are used by IPv4 objects in both single IP addresses and networks. """ __slots__ = () _version = 4 # Equivalent to 255.255.255.255 or 32 bits of 1's. _ALL_ONES = (2 ** IPV4LENGTH) - 1 _DECIMAL_DIGITS = frozenset('0123456789') # the valid octets for host and netmasks. only useful for IPv4. _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) _max_prefixlen = IPV4LENGTH # There are only a handful of valid v4 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} def _explode_shorthand_ip_string(self): return _compat_str(self) @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: try: # Check for a netmask in prefix length form prefixlen = cls._prefix_from_prefix_string(arg) except NetmaskValueError: # Check for a netmask or hostmask in dotted-quad form. # This may raise NetmaskValueError. prefixlen = cls._prefix_from_ip_string(arg) netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: return _compat_int_from_byte_vals( map(cls._parse_octet, octets), 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_octet(cls, octet_str): """Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255]. """ if not octet_str: raise ValueError("Empty octet not permitted") # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Convert to integer (we know digits are legal) octet_int = int(octet_str, 10) # Any octets that look like they *might* be written in octal, # and which don't look exactly the same in both octal and # decimal are rejected as ambiguous if octet_int > 7 and octet_str[0] == '0': msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int @classmethod def _string_from_ip_int(cls, ip_int): """Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation. """ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] if isinstance(b, bytes) else b) for b in _compat_to_bytes(ip_int, 4, 'big')) def _is_hostmask(self, ip_str): """Test if the IP string is a hostmask (rather than a netmask). Args: ip_str: A string, the potential hostmask. Returns: A boolean, True if the IP string is a hostmask. """ bits = ip_str.split('.') try: parts = [x for x in map(int, bits) if x in self._valid_mask_octets] except ValueError: return False if len(parts) != len(bits): return False if parts[0] < parts[-1]: return True return False def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv4 address. This implements the method described in RFC1035 3.5. """ reverse_octets = _compat_str(self).split('.')[::-1] return '.'.join(reverse_octets) + '.in-addr.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv4Address(_BaseV4, _BaseAddress): """Represent and manipulate single IPv4 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """ Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv4Address('192.0.2.1') == IPv4Address(3221225985). or, more generally IPv4Address(int(IPv4Address('192.0.2.1'))) == IPv4Address('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 4) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v4_int_to_packed(self._ip) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within the reserved IPv4 Network range. """ return self in self._constants._reserved_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): return ( self not in self._constants._public_network and not self.is_private) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is multicast. See RFC 3171 for details. """ return self in self._constants._multicast_network @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3. """ return self == self._constants._unspecified_address @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback per RFC 3330. """ return self in self._constants._loopback_network @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is link-local per RFC 3927. """ return self in self._constants._linklocal_network class IPv4Interface(IPv4Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv4Address.__init__(self, address) self.network = IPv4Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv4Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv4Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv4Address.__init__(self, addr[0]) self.network = IPv4Network(address, strict=False) self._prefixlen = self.network._prefixlen self.netmask = self.network.netmask self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv4Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv4Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return self.network < other.network except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv4Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) class IPv4Network(_BaseV4, _BaseNetwork): """This class represents and manipulates 32-bit IPv4 network + addresses.. Attributes: [examples for IPv4Network('192.0.2.0/27')] .network_address: IPv4Address('192.0.2.0') .hostmask: IPv4Address('0.0.0.31') .broadcast_address: IPv4Address('192.0.2.32') .netmask: IPv4Address('255.255.255.224') .prefixlen: 27 """ # Class to use when creating address objects _address_class = IPv4Address def __init__(self, address, strict=True): """Instantiate a new IPv4 network object. Args: address: A string or integer representing the IP [& network]. '192.0.2.0/24' '192.0.2.0/255.255.255.0' '192.0.0.2/0.0.0.255' are all functionally the same in IPv4. Similarly, '192.0.2.1' '192.0.2.1/255.255.255.255' '192.0.2.1/32' are also functionally equivalent. That is to say, failing to provide a subnetmask will create an object with a mask of /32. If the mask (portion after the / in the argument) is given in dotted quad form, it is treated as a netmask if it starts with a non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it starts with a zero field (e.g. 0.255.255.255 == /8), with the single exception of an all-zero mask which is treated as a netmask == /0. If no mask is given, a default of /32 is used. Additionally, an integer can be passed, so IPv4Network('192.0.2.1') == IPv4Network(3221225985) or, more generally IPv4Interface(int(IPv4Interface('192.0.2.1'))) == IPv4Interface('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. NetmaskValueError: If the netmask isn't valid for an IPv4 address. ValueError: If strict is True and a network address is not supplied. """ _BaseNetwork.__init__(self, address) # Constructing from a packed address or integer if isinstance(address, (_compat_int_types, bytes)): self.network_address = IPv4Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) # fixme: address/network test here. return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: # We weren't given an address[1] arg = self._max_prefixlen self.network_address = IPv4Address(address[0]) self.netmask, self._prefixlen = self._make_netmask(arg) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv4Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv4Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv4Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry. """ return (not (self.network_address in IPv4Network('100.64.0.0/10') and self.broadcast_address in IPv4Network('100.64.0.0/10')) and not self.is_private) class _IPv4Constants(object): _linklocal_network = IPv4Network('169.254.0.0/16') _loopback_network = IPv4Network('127.0.0.0/8') _multicast_network = IPv4Network('224.0.0.0/4') _public_network = IPv4Network('100.64.0.0/10') _private_networks = [ IPv4Network('0.0.0.0/8'), IPv4Network('10.0.0.0/8'), IPv4Network('127.0.0.0/8'), IPv4Network('169.254.0.0/16'), IPv4Network('172.16.0.0/12'), IPv4Network('192.0.0.0/29'), IPv4Network('192.0.0.170/31'), IPv4Network('192.0.2.0/24'), IPv4Network('192.168.0.0/16'), IPv4Network('198.18.0.0/15'), IPv4Network('198.51.100.0/24'), IPv4Network('203.0.113.0/24'), IPv4Network('240.0.0.0/4'), IPv4Network('255.255.255.255/32'), ] _reserved_network = IPv4Network('240.0.0.0/4') _unspecified_address = IPv4Address('0.0.0.0') IPv4Address._constants = _IPv4Constants class _BaseV6(object): """Base IPv6 object. The following methods are used by IPv6 objects in both single IP addresses and networks. """ __slots__ = () _version = 6 _ALL_ONES = (2 ** IPV6LENGTH) - 1 _HEXTET_COUNT = 8 _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') _max_prefixlen = IPV6LENGTH # There are only a bunch of valid v6 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: prefixlen = cls._prefix_from_prefix_string(arg) netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). _min_parts = 3 if len(parts) < _min_parts: msg = "At least %d parts expected in %r" % (_min_parts, ip_str) raise AddressValueError(msg) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: try: ipv4_int = IPv4Address(parts.pop())._ip except AddressValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). # The extra colon comes from using the "::" notation for a single # leading or trailing zero part. _max_parts = cls._HEXTET_COUNT + 1 if len(parts) > _max_parts: msg = "At most %d colons permitted in %r" % ( _max_parts - 1, ip_str) raise AddressValueError(msg) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. skip_index = None for i in _compat_range(1, len(parts) - 1): if not parts[i]: if skip_index is not None: # Can't have more than one '::' msg = "At most one '::' permitted in %r" % ip_str raise AddressValueError(msg) skip_index = i # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: msg = "Expected at most %d other parts with '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) else: # Otherwise, allocate the entire address to parts_hi. The # endpoints could still be empty, but _parse_hextet() will check # for that. if len(parts) != cls._HEXTET_COUNT: msg = "Exactly %d parts expected without '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) if not parts[0]: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0 for i in range(parts_hi): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in range(-parts_lo, 0): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) return ip_int except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_hextet(cls, hextet_str): """Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF]. """ # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._HEX_DIGITS.issuperset(hextet_str): raise ValueError("Only hex digits permitted in %r" % hextet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(hextet_str) > 4: msg = "At most 4 characters permitted in %r" raise ValueError(msg % hextet_str) # Length check means we can skip checking the integer value return int(hextet_str, 16) @classmethod def _compress_hextets(cls, hextets): """Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings. """ best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index, hextet in enumerate(hextets): if hextet == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets return hextets @classmethod def _string_from_ip_int(cls, ip_int=None): """Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones. """ if ip_int is None: ip_int = int(cls._ip) if ip_int > cls._ALL_ONES: raise ValueError('IPv6 address is too large') hex_str = '%032x' % ip_int hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] hextets = cls._compress_hextets(hextets) return ':'.join(hextets) def _explode_shorthand_ip_string(self): """Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if isinstance(self, IPv6Network): ip_str = _compat_str(self.network_address) elif isinstance(self, IPv6Interface): ip_str = _compat_str(self.ip) else: ip_str = _compat_str(self) ip_int = self._ip_int_from_string(ip_str) hex_str = '%032x' % ip_int parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] if isinstance(self, (_BaseNetwork, IPv6Interface)): return '%s/%d' % (':'.join(parts), self._prefixlen) return ':'.join(parts) def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv6 address. This implements the method described in RFC3596 2.5. """ reverse_chars = self.exploded[::-1].replace(':', '') return '.'.join(reverse_chars) + '.ip6.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv6Address(_BaseV6, _BaseAddress): """Represent and manipulate single IPv6 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 16) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v6_int_to_packed(self._ip) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return self in self._constants._multicast_network @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return any(self in x for x in self._constants._reserved_networks) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return self in self._constants._linklocal_network @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return self in self._constants._sitelocal_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return self._ip == 0 @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return self._ip == 1 @property def ipv4_mapped(self): """Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. """ if (self._ip >> 32) != 0xFFFF: return None return IPv4Address(self._ip & 0xFFFFFFFF) @property def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF)) @property def sixtofour(self): """Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. """ if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) class IPv6Interface(IPv6Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv6Address.__init__(self, address) self.network = IPv6Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv6Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv6Address.__init__(self, addr[0]) self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self._prefixlen = self.network._prefixlen self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv6Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv6Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return self.network < other.network except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv6Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) @property def is_unspecified(self): return self._ip == 0 and self.network.is_unspecified @property def is_loopback(self): return self._ip == 1 and self.network.is_loopback class IPv6Network(_BaseV6, _BaseNetwork): """This class represents and manipulates 128-bit IPv6 networks. Attributes: [examples for IPv6('2001:db8::1000/124')] .network_address: IPv6Address('2001:db8::1000') .hostmask: IPv6Address('::f') .broadcast_address: IPv6Address('2001:db8::100f') .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') .prefixlen: 124 """ # Class to use when creating address objects _address_class = IPv6Address def __init__(self, address, strict=True): """Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. """ _BaseNetwork.__init__(self, address) # Efficient constructor from integer or packed address if isinstance(address, (bytes, _compat_int_types)): self.network_address = IPv6Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) self.network_address = IPv6Address(address[0]) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv6Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv6Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv6Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the Subnet-Router anycast address. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast + 1): yield self._address_class(x) @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return (self.network_address.is_site_local and self.broadcast_address.is_site_local) class _IPv6Constants(object): _linklocal_network = IPv6Network('fe80::/10') _multicast_network = IPv6Network('ff00::/8') _private_networks = [ IPv6Network('::1/128'), IPv6Network('::/128'), IPv6Network('::ffff:0:0/96'), IPv6Network('100::/64'), IPv6Network('2001::/23'), IPv6Network('2001:2::/48'), IPv6Network('2001:db8::/32'), IPv6Network('2001:10::/28'), IPv6Network('fc00::/7'), IPv6Network('fe80::/10'), ] _reserved_networks = [ IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9'), ] _sitelocal_network = IPv6Network('fec0::/10') IPv6Address._constants = _IPv6Constants PK.e[P1webencodings/mklabels.pynu[""" webencodings.mklabels ~~~~~~~~~~~~~~~~~~~~~ Regenarate the webencodings.labels module. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ import json try: from urllib import urlopen except ImportError: from urllib.request import urlopen def assert_lower(string): assert string == string.lower() return string def generate(url): parts = ['''\ """ webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { '''] labels = [ (repr(assert_lower(label)).lstrip('u'), repr(encoding['name']).lstrip('u')) for category in json.loads(urlopen(url).read().decode('ascii')) for encoding in category['encodings'] for label in encoding['labels']] max_len = max(len(label) for label, name in labels) parts.extend( ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) for label, name in labels) parts.append('}') return ''.join(parts) if __name__ == '__main__': print(generate('http://encoding.spec.whatwg.org/encodings.json')) PK.e[ webencodings/mklabels.pyonu[ abc@szdZddlZyddlmZWn!ek rIddlmZnXdZdZedkrvedGHndS(s webencodings.mklabels ~~~~~~~~~~~~~~~~~~~~~ Regenarate the webencodings.labels module. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. iN(turlopencCs|S(N((tstring((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyt assert_lowerscsdg}gtjt|jjdD]\}|dD]K}|dD]:}tt|jdt|djdf^qJq<q.}td|D|j fd|D|j d d j |S( Ns""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { tasciit encodingstlabelstutnamecss!|]\}}t|VqdS(N(tlen(t.0tlabelR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pys 2sc3s6|],\}}d|dt||fVqdS(s %s:%s %s, t N(R(R R R(tmax_len(sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pys 4st}t( tjsontloadsRtreadtdecodetreprRtlstriptmaxtextendtappendtjoin(turltpartstcategorytencodingR R((R sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pytgenerates (N   t__main__s.http://encoding.spec.whatwg.org/encodings.json( t__doc__RturllibRt ImportErrorturllib.requestRRt__name__(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyt s    ! PK.e[cz,,webencodings/__init__.pycnu[ abc@s6dZddlmZddlZddlmZdZidd6d d 6d d 6d d6ZiZdZ dZ dZ de fdYZ e dZe dZe dZddZdZeddZddZdZeddZdZd e fd!YZd"e fd#YZdS($u webencodings ~~~~~~~~~~~~ This is a Python implementation of the `WHATWG Encoding standard `. See README for details. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsNi(tLABELSu0.5u iso-8859-8u iso-8859-8-iu mac-cyrillicux-mac-cyrillicu mac-romanu macintoshucp874u windows-874cCs|jdjjdS(u9Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. :param string: An Unicode string. :returns: A new Unicode string. This is used for `ASCII case-insensitive `_ matching of encoding labels. The same matching is also used, among other things, for `CSS keywords `_. This is different from the :meth:`~py:str.lower` method of Unicode strings which also affect non-ASCII characters, sometimes mapping them into the ASCII range: >>> keyword = u'Bac\N{KELVIN SIGN}ground' >>> assert keyword.lower() == u'background' >>> assert ascii_lower(keyword) != keyword.lower() >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' uutf8(tencodetlowertdecode(tstring((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt ascii_lower#scCst|jd}tj|}|dkr4dStj|}|dkr|dkrnddlm}n!tj||}t j |}t ||}|t|`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. u ux-user-definedi(t codec_infoN( RtstripRtgettNonetCACHEtx_user_definedRt PYTHON_NAMEStcodecstlookuptEncoding(tlabeltnametencodingRt python_name((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR=s     cCsBt|dr|St|}|dkr>td|n|S(u Accept either an encoding object or label. :param encoding: An :class:`Encoding` object or a label string. :returns: An :class:`Encoding` object. :raises: :exc:`~exceptions.LookupError` for an unknown label. u codec_infouUnknown encoding label: %rN(thasattrRR t LookupError(tencoding_or_labelR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt _get_encoding[s   RcBs eZdZdZdZRS(uOReresents a character encoding such as UTF-8, that can be used for decoding or encoding. .. attribute:: name Canonical name of the encoding .. attribute:: codec_info The actual implementation of the encoding, a stdlib :class:`~codecs.CodecInfo` object. See :func:`codecs.register`. cCs||_||_dS(N(RR(tselfRR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt__init__|s cCs d|jS(Nu (R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt__repr__s(t__name__t __module__t__doc__RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRms uutf-8uutf-16leuutf-16beureplacecCsGt|}t|\}}|p'|}|jj||d|fS(u Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`. i(Rt _detect_bomRR(tinputtfallback_encodingterrorst bom_encodingR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs  cCsa|jdrt|dfS|jdr:t|dfS|jdrWt|dfSd|fS(uBReturn (bom_encoding, input), with any BOM removed from the input.sissiN(t startswitht_UTF16LEt_UTF16BEtUTF8R (R ((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRsustrictcCst|jj||dS(u; Encode a single string. :param input: An Unicode string. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A byte string. i(RRR(R RR"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs cCs4t||}t||}t|}||fS(u "Pull"-based decoder. :param input: An iterable of byte strings. The input is first consumed just enough to determine the encoding based on the precense of a BOM, then consumed on demand when the return value is. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An ``(output, encoding)`` tuple. :obj:`output` is an iterable of Unicode strings, :obj:`encoding` is the :obj:`Encoding` that is being used. (tIncrementalDecodert_iter_decode_generatortnext(R R!R"tdecodert generatorR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt iter_decodes ccs|j}t|}x|D]>}||}|r|jdk sIt|jV|VPqqW|ddt}|jdk st|jV|r|VndSx(|D] }||}|r|VqqW|ddt}|r|VndS(uqReturn a generator that first yields the :obj:`Encoding`, then yields output chukns as Unicode strings. ttfinalN(RtiterRR tAssertionErrortTrue(R R+Rtchuncktoutput((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR)s,       cCst||j}t||S(uY “Pull”-based encoder. :param input: An iterable of Unicode strings. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An iterable of byte strings. (tIncrementalEncoderRt_iter_encode_generator(R RR"R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt iter_encodes ccsOx(|D] }||}|r|VqqW|ddt}|rK|VndS(NuR/(R2(R RR3R4((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR6s   R(cBs&eZdZddZedZRS(uO “Push”-based decoder. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. ureplacecCs7t||_||_d|_d|_d|_dS(NR.(Rt_fallback_encodingt_errorst_bufferR t_decoderR(RR!R"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs    cCs|j}|dk r"|||S|j|}t|\}}|dkrt|dkrs| rs||_dS|j}n|jj|jj }||_||_ |||S(uDecode one chunk of the input. :param input: A byte string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: An Unicode string. iuN( R;R R:RtlenR8RtincrementaldecoderR9RR(RR R/R+R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR's         (RRRRtFalseR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR(s  R5cBseZdZeddZRS(u “Push”-based encoder. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. .. method:: encode(input, final=False) :param input: An Unicode string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: A byte string. ustrictcCs(t|}|jj|j|_dS(N(RRtincrementalencoderR(RRR"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRTs (RRRR'R(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR5Cs(Rt __future__RRtlabelsRtVERSIONR R RRRtobjectRR'R%R&RRRR-R)R7R6R(R5(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt s4             3PK.e["webencodings/mklabels.pycnu[ abc@szdZddlZyddlmZWn!ek rIddlmZnXdZdZedkrvedGHndS(s webencodings.mklabels ~~~~~~~~~~~~~~~~~~~~~ Regenarate the webencodings.labels module. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. iN(turlopencCs||jkst|S(N(tlowertAssertionError(tstring((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyt assert_lowerscsdg}gtjt|jjdD]\}|dD]K}|dD]:}tt|jdt|djdf^qJq<q.}td|D|j fd|D|j d d j |S( Ns""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { tasciit encodingstlabelstutnamecss!|]\}}t|VqdS(N(tlen(t.0tlabelR ((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pys 2sc3s6|],\}}d|dt||fVqdS(s %s:%s %s, t N(R (R R R (tmax_len(sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pys 4st}t( tjsontloadsRtreadtdecodetreprRtlstriptmaxtextendtappendtjoin(turltpartstcategorytencodingR R((RsE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pytgenerates (N   t__main__s.http://encoding.spec.whatwg.org/encodings.json( t__doc__RturllibRt ImportErrorturllib.requestRRt__name__(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyt s    ! PK.e[!webencodings/labels.pycnu[ abc@sdZidd6dd6dd6dd6dd6dd6dd6dd 6dd6dd 6dd 6dd 6dd 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6dd"6d#d$6d#d%6d#d#6d#d&6d#d'6d#d(6d#d)6d#d*6d+d,6d+d-6d+d.6d+d/6d+d06d+d16d+d+6d+d26d+d36d+d46d+d56d+d66d+d76d+d86d9d:6d9d;6d9d<6d9d=6d9d>6d9d96d9d?6d9d@6d9dA6d9dB6d9dC6d9dD6dEdF6dEdG6dEdH6dEdE6dEdI6dEdJ6dEdK6dEdL6dEdM6dEdN6dEdO6dPdQ6dPdP6dPdR6dSdT6dSdS6dSdU6dSdV6dSdW6dSdX6dSdY6dZdZ6dZd[6dZd\6d]d]6d]d^6d]d_6d`da6d`d`6d`db6d`dc6d`dd6d`de6dfdf6dgdh6dgdi6dgdj6dgdg6dgdk6dldl6dmdn6dmdo6dmdm6dmdp6dqdr6dqds6dqdt6dqdu6dqdv6dqdq6dwdx6dwdw6dwdy6dzd{6dzdz6dzd|6d}d~6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d}6d}d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6ZdS(s webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. sutf-8sunicode-1-1-utf-8tutf8tibm866t866tcp866tcsibm866s iso-8859-2t csisolatin2s iso-ir-101s iso8859-2tiso88592s iso_8859-2siso_8859-2:1987tl2tlatin2s iso-8859-3t csisolatin3s iso-ir-109s iso8859-3tiso88593s iso_8859-3siso_8859-3:1988tl3tlatin3s iso-8859-4t csisolatin4s iso-ir-110s iso8859-4tiso88594s iso_8859-4siso_8859-4:1988tl4tlatin4s iso-8859-5tcsisolatincyrillictcyrillics iso-ir-144s iso8859-5tiso88595s iso_8859-5siso_8859-5:1988s iso-8859-6tarabicsasmo-708t csiso88596et csiso88596itcsisolatinarabicsecma-114s iso-8859-6-es iso-8859-6-is iso-ir-127s iso8859-6tiso88596s iso_8859-6siso_8859-6:1987s iso-8859-7tcsisolatingreeksecma-118telot_928tgreektgreek8s iso-ir-126s iso8859-7tiso88597s iso_8859-7siso_8859-7:1987t sun_eu_greeks iso-8859-8t csiso88598etcsisolatinhebrewthebrews iso-8859-8-es iso-ir-138s iso8859-8tiso88598s iso_8859-8siso_8859-8:1988tvisuals iso-8859-8-it csiso88598itlogicals iso-8859-10t csisolatin6s iso-ir-157s iso8859-10t iso885910tl6tlatin6s iso-8859-13s iso8859-13t iso885913s iso-8859-14s iso8859-14t iso885914s iso-8859-15t csisolatin9s iso8859-15t iso885915s iso_8859-15tl9s iso-8859-16skoi8-rtcskoi8rtkoitkoi8tkoi8_rskoi8-ut macintosht csmacintoshtmacs x-mac-romans windows-874sdos-874s iso-8859-11s iso8859-11t iso885911stis-620s windows-1250tcp1250sx-cp1250s windows-1251tcp1251sx-cp1251s windows-1252sansi_x3.4-1968tasciitcp1252tcp819t csisolatin1tibm819s iso-8859-1s iso-ir-100s iso8859-1tiso88591s iso_8859-1siso_8859-1:1987tl1tlatin1sus-asciisx-cp1252s windows-1253tcp1253sx-cp1253s windows-1254tcp1254t csisolatin5s iso-8859-9s iso-ir-148s iso8859-9tiso88599s iso_8859-9siso_8859-9:1989tl5tlatin5sx-cp1254s windows-1255tcp1255sx-cp1255s windows-1256tcp1256sx-cp1256s windows-1257tcp1257sx-cp1257s windows-1258tcp1258sx-cp1258sx-mac-cyrillicsx-mac-ukrainiantgbktchinesetcsgb2312tcsiso58gb231280tgb2312tgb_2312s gb_2312-80s iso-ir-58sx-gbktgb18030s hz-gb-2312tbig5s big5-hkscsscn-big5tcsbig5sx-x-big5seuc-jptcseucpkdfmtjapanesesx-euc-jps iso-2022-jpt csiso2022jpt shift_jist csshiftjistms_kanjis shift-jistsjiss windows-31jsx-sjisseuc-krtcseuckrt csksc56011987s iso-ir-149tkoreansks_c_5601-1987sks_c_5601-1989tksc5601tksc_5601s windows-949s iso-2022-krt csiso2022krsutf-16besutf-16lesutf-16sx-user-definedN(t__doc__tLABELS(((sC/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.pyt sPK.e[ǭ##webencodings/labels.pynu[""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { 'unicode-1-1-utf-8': 'utf-8', 'utf-8': 'utf-8', 'utf8': 'utf-8', '866': 'ibm866', 'cp866': 'ibm866', 'csibm866': 'ibm866', 'ibm866': 'ibm866', 'csisolatin2': 'iso-8859-2', 'iso-8859-2': 'iso-8859-2', 'iso-ir-101': 'iso-8859-2', 'iso8859-2': 'iso-8859-2', 'iso88592': 'iso-8859-2', 'iso_8859-2': 'iso-8859-2', 'iso_8859-2:1987': 'iso-8859-2', 'l2': 'iso-8859-2', 'latin2': 'iso-8859-2', 'csisolatin3': 'iso-8859-3', 'iso-8859-3': 'iso-8859-3', 'iso-ir-109': 'iso-8859-3', 'iso8859-3': 'iso-8859-3', 'iso88593': 'iso-8859-3', 'iso_8859-3': 'iso-8859-3', 'iso_8859-3:1988': 'iso-8859-3', 'l3': 'iso-8859-3', 'latin3': 'iso-8859-3', 'csisolatin4': 'iso-8859-4', 'iso-8859-4': 'iso-8859-4', 'iso-ir-110': 'iso-8859-4', 'iso8859-4': 'iso-8859-4', 'iso88594': 'iso-8859-4', 'iso_8859-4': 'iso-8859-4', 'iso_8859-4:1988': 'iso-8859-4', 'l4': 'iso-8859-4', 'latin4': 'iso-8859-4', 'csisolatincyrillic': 'iso-8859-5', 'cyrillic': 'iso-8859-5', 'iso-8859-5': 'iso-8859-5', 'iso-ir-144': 'iso-8859-5', 'iso8859-5': 'iso-8859-5', 'iso88595': 'iso-8859-5', 'iso_8859-5': 'iso-8859-5', 'iso_8859-5:1988': 'iso-8859-5', 'arabic': 'iso-8859-6', 'asmo-708': 'iso-8859-6', 'csiso88596e': 'iso-8859-6', 'csiso88596i': 'iso-8859-6', 'csisolatinarabic': 'iso-8859-6', 'ecma-114': 'iso-8859-6', 'iso-8859-6': 'iso-8859-6', 'iso-8859-6-e': 'iso-8859-6', 'iso-8859-6-i': 'iso-8859-6', 'iso-ir-127': 'iso-8859-6', 'iso8859-6': 'iso-8859-6', 'iso88596': 'iso-8859-6', 'iso_8859-6': 'iso-8859-6', 'iso_8859-6:1987': 'iso-8859-6', 'csisolatingreek': 'iso-8859-7', 'ecma-118': 'iso-8859-7', 'elot_928': 'iso-8859-7', 'greek': 'iso-8859-7', 'greek8': 'iso-8859-7', 'iso-8859-7': 'iso-8859-7', 'iso-ir-126': 'iso-8859-7', 'iso8859-7': 'iso-8859-7', 'iso88597': 'iso-8859-7', 'iso_8859-7': 'iso-8859-7', 'iso_8859-7:1987': 'iso-8859-7', 'sun_eu_greek': 'iso-8859-7', 'csiso88598e': 'iso-8859-8', 'csisolatinhebrew': 'iso-8859-8', 'hebrew': 'iso-8859-8', 'iso-8859-8': 'iso-8859-8', 'iso-8859-8-e': 'iso-8859-8', 'iso-ir-138': 'iso-8859-8', 'iso8859-8': 'iso-8859-8', 'iso88598': 'iso-8859-8', 'iso_8859-8': 'iso-8859-8', 'iso_8859-8:1988': 'iso-8859-8', 'visual': 'iso-8859-8', 'csiso88598i': 'iso-8859-8-i', 'iso-8859-8-i': 'iso-8859-8-i', 'logical': 'iso-8859-8-i', 'csisolatin6': 'iso-8859-10', 'iso-8859-10': 'iso-8859-10', 'iso-ir-157': 'iso-8859-10', 'iso8859-10': 'iso-8859-10', 'iso885910': 'iso-8859-10', 'l6': 'iso-8859-10', 'latin6': 'iso-8859-10', 'iso-8859-13': 'iso-8859-13', 'iso8859-13': 'iso-8859-13', 'iso885913': 'iso-8859-13', 'iso-8859-14': 'iso-8859-14', 'iso8859-14': 'iso-8859-14', 'iso885914': 'iso-8859-14', 'csisolatin9': 'iso-8859-15', 'iso-8859-15': 'iso-8859-15', 'iso8859-15': 'iso-8859-15', 'iso885915': 'iso-8859-15', 'iso_8859-15': 'iso-8859-15', 'l9': 'iso-8859-15', 'iso-8859-16': 'iso-8859-16', 'cskoi8r': 'koi8-r', 'koi': 'koi8-r', 'koi8': 'koi8-r', 'koi8-r': 'koi8-r', 'koi8_r': 'koi8-r', 'koi8-u': 'koi8-u', 'csmacintosh': 'macintosh', 'mac': 'macintosh', 'macintosh': 'macintosh', 'x-mac-roman': 'macintosh', 'dos-874': 'windows-874', 'iso-8859-11': 'windows-874', 'iso8859-11': 'windows-874', 'iso885911': 'windows-874', 'tis-620': 'windows-874', 'windows-874': 'windows-874', 'cp1250': 'windows-1250', 'windows-1250': 'windows-1250', 'x-cp1250': 'windows-1250', 'cp1251': 'windows-1251', 'windows-1251': 'windows-1251', 'x-cp1251': 'windows-1251', 'ansi_x3.4-1968': 'windows-1252', 'ascii': 'windows-1252', 'cp1252': 'windows-1252', 'cp819': 'windows-1252', 'csisolatin1': 'windows-1252', 'ibm819': 'windows-1252', 'iso-8859-1': 'windows-1252', 'iso-ir-100': 'windows-1252', 'iso8859-1': 'windows-1252', 'iso88591': 'windows-1252', 'iso_8859-1': 'windows-1252', 'iso_8859-1:1987': 'windows-1252', 'l1': 'windows-1252', 'latin1': 'windows-1252', 'us-ascii': 'windows-1252', 'windows-1252': 'windows-1252', 'x-cp1252': 'windows-1252', 'cp1253': 'windows-1253', 'windows-1253': 'windows-1253', 'x-cp1253': 'windows-1253', 'cp1254': 'windows-1254', 'csisolatin5': 'windows-1254', 'iso-8859-9': 'windows-1254', 'iso-ir-148': 'windows-1254', 'iso8859-9': 'windows-1254', 'iso88599': 'windows-1254', 'iso_8859-9': 'windows-1254', 'iso_8859-9:1989': 'windows-1254', 'l5': 'windows-1254', 'latin5': 'windows-1254', 'windows-1254': 'windows-1254', 'x-cp1254': 'windows-1254', 'cp1255': 'windows-1255', 'windows-1255': 'windows-1255', 'x-cp1255': 'windows-1255', 'cp1256': 'windows-1256', 'windows-1256': 'windows-1256', 'x-cp1256': 'windows-1256', 'cp1257': 'windows-1257', 'windows-1257': 'windows-1257', 'x-cp1257': 'windows-1257', 'cp1258': 'windows-1258', 'windows-1258': 'windows-1258', 'x-cp1258': 'windows-1258', 'x-mac-cyrillic': 'x-mac-cyrillic', 'x-mac-ukrainian': 'x-mac-cyrillic', 'chinese': 'gbk', 'csgb2312': 'gbk', 'csiso58gb231280': 'gbk', 'gb2312': 'gbk', 'gb_2312': 'gbk', 'gb_2312-80': 'gbk', 'gbk': 'gbk', 'iso-ir-58': 'gbk', 'x-gbk': 'gbk', 'gb18030': 'gb18030', 'hz-gb-2312': 'hz-gb-2312', 'big5': 'big5', 'big5-hkscs': 'big5', 'cn-big5': 'big5', 'csbig5': 'big5', 'x-x-big5': 'big5', 'cseucpkdfmtjapanese': 'euc-jp', 'euc-jp': 'euc-jp', 'x-euc-jp': 'euc-jp', 'csiso2022jp': 'iso-2022-jp', 'iso-2022-jp': 'iso-2022-jp', 'csshiftjis': 'shift_jis', 'ms_kanji': 'shift_jis', 'shift-jis': 'shift_jis', 'shift_jis': 'shift_jis', 'sjis': 'shift_jis', 'windows-31j': 'shift_jis', 'x-sjis': 'shift_jis', 'cseuckr': 'euc-kr', 'csksc56011987': 'euc-kr', 'euc-kr': 'euc-kr', 'iso-ir-149': 'euc-kr', 'korean': 'euc-kr', 'ks_c_5601-1987': 'euc-kr', 'ks_c_5601-1989': 'euc-kr', 'ksc5601': 'euc-kr', 'ksc_5601': 'euc-kr', 'windows-949': 'euc-kr', 'csiso2022kr': 'iso-2022-kr', 'iso-2022-kr': 'iso-2022-kr', 'utf-16be': 'utf-16be', 'utf-16': 'utf-16le', 'utf-16le': 'utf-16le', 'x-user-defined': 'x-user-defined', } PK.e[!webencodings/labels.pyonu[ abc@sdZidd6dd6dd6dd6dd6dd6dd6dd 6dd6dd 6dd 6dd 6dd 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6dd"6d#d$6d#d%6d#d#6d#d&6d#d'6d#d(6d#d)6d#d*6d+d,6d+d-6d+d.6d+d/6d+d06d+d16d+d+6d+d26d+d36d+d46d+d56d+d66d+d76d+d86d9d:6d9d;6d9d<6d9d=6d9d>6d9d96d9d?6d9d@6d9dA6d9dB6d9dC6d9dD6dEdF6dEdG6dEdH6dEdE6dEdI6dEdJ6dEdK6dEdL6dEdM6dEdN6dEdO6dPdQ6dPdP6dPdR6dSdT6dSdS6dSdU6dSdV6dSdW6dSdX6dSdY6dZdZ6dZd[6dZd\6d]d]6d]d^6d]d_6d`da6d`d`6d`db6d`dc6d`dd6d`de6dfdf6dgdh6dgdi6dgdj6dgdg6dgdk6dldl6dmdn6dmdo6dmdm6dmdp6dqdr6dqds6dqdt6dqdu6dqdv6dqdq6dwdx6dwdw6dwdy6dzd{6dzdz6dzd|6d}d~6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d6d}d}6d}d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6ZdS(s webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. sutf-8sunicode-1-1-utf-8tutf8tibm866t866tcp866tcsibm866s iso-8859-2t csisolatin2s iso-ir-101s iso8859-2tiso88592s iso_8859-2siso_8859-2:1987tl2tlatin2s iso-8859-3t csisolatin3s iso-ir-109s iso8859-3tiso88593s iso_8859-3siso_8859-3:1988tl3tlatin3s iso-8859-4t csisolatin4s iso-ir-110s iso8859-4tiso88594s iso_8859-4siso_8859-4:1988tl4tlatin4s iso-8859-5tcsisolatincyrillictcyrillics iso-ir-144s iso8859-5tiso88595s iso_8859-5siso_8859-5:1988s iso-8859-6tarabicsasmo-708t csiso88596et csiso88596itcsisolatinarabicsecma-114s iso-8859-6-es iso-8859-6-is iso-ir-127s iso8859-6tiso88596s iso_8859-6siso_8859-6:1987s iso-8859-7tcsisolatingreeksecma-118telot_928tgreektgreek8s iso-ir-126s iso8859-7tiso88597s iso_8859-7siso_8859-7:1987t sun_eu_greeks iso-8859-8t csiso88598etcsisolatinhebrewthebrews iso-8859-8-es iso-ir-138s iso8859-8tiso88598s iso_8859-8siso_8859-8:1988tvisuals iso-8859-8-it csiso88598itlogicals iso-8859-10t csisolatin6s iso-ir-157s iso8859-10t iso885910tl6tlatin6s iso-8859-13s iso8859-13t iso885913s iso-8859-14s iso8859-14t iso885914s iso-8859-15t csisolatin9s iso8859-15t iso885915s iso_8859-15tl9s iso-8859-16skoi8-rtcskoi8rtkoitkoi8tkoi8_rskoi8-ut macintosht csmacintoshtmacs x-mac-romans windows-874sdos-874s iso-8859-11s iso8859-11t iso885911stis-620s windows-1250tcp1250sx-cp1250s windows-1251tcp1251sx-cp1251s windows-1252sansi_x3.4-1968tasciitcp1252tcp819t csisolatin1tibm819s iso-8859-1s iso-ir-100s iso8859-1tiso88591s iso_8859-1siso_8859-1:1987tl1tlatin1sus-asciisx-cp1252s windows-1253tcp1253sx-cp1253s windows-1254tcp1254t csisolatin5s iso-8859-9s iso-ir-148s iso8859-9tiso88599s iso_8859-9siso_8859-9:1989tl5tlatin5sx-cp1254s windows-1255tcp1255sx-cp1255s windows-1256tcp1256sx-cp1256s windows-1257tcp1257sx-cp1257s windows-1258tcp1258sx-cp1258sx-mac-cyrillicsx-mac-ukrainiantgbktchinesetcsgb2312tcsiso58gb231280tgb2312tgb_2312s gb_2312-80s iso-ir-58sx-gbktgb18030s hz-gb-2312tbig5s big5-hkscsscn-big5tcsbig5sx-x-big5seuc-jptcseucpkdfmtjapanesesx-euc-jps iso-2022-jpt csiso2022jpt shift_jist csshiftjistms_kanjis shift-jistsjiss windows-31jsx-sjisseuc-krtcseuckrt csksc56011987s iso-ir-149tkoreansks_c_5601-1987sks_c_5601-1989tksc5601tksc_5601s windows-949s iso-2022-krt csiso2022krsutf-16besutf-16lesutf-16sx-user-definedN(t__doc__tLABELS(((sC/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.pyt sPK.e[>webencodings/tests.pynu[# coding: utf8 """ webencodings.tests ~~~~~~~~~~~~~~~~~~ A basic test suite for Encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, IncrementalDecoder, IncrementalEncoder, UTF8) def assert_raises(exception, function, *args, **kwargs): try: function(*args, **kwargs) except exception: return else: # pragma: no cover raise AssertionError('Did not raise %s.' % exception) def test_labels(): assert lookup('utf-8').name == 'utf-8' assert lookup('Utf-8').name == 'utf-8' assert lookup('UTF-8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8 ').name == 'utf-8' assert lookup(' \r\nutf8\t').name == 'utf-8' assert lookup('u8') is None # Python label. assert lookup('utf-8 ') is None # Non-ASCII white space. assert lookup('US-ASCII').name == 'windows-1252' assert lookup('iso-8859-1').name == 'windows-1252' assert lookup('latin1').name == 'windows-1252' assert lookup('LATIN1').name == 'windows-1252' assert lookup('latin-1') is None assert lookup('LATİN1') is None # ASCII-only case insensitivity. def test_all_labels(): for label in LABELS: assert decode(b'', label) == ('', lookup(label)) assert encode('', label) == b'' for repeat in [0, 1, 12]: output, _ = iter_decode([b''] * repeat, label) assert list(output) == [] assert list(iter_encode([''] * repeat, label)) == [] decoder = IncrementalDecoder(label) assert decoder.decode(b'') == '' assert decoder.decode(b'', final=True) == '' encoder = IncrementalEncoder(label) assert encoder.encode('') == b'' assert encoder.encode('', final=True) == b'' # All encoding names are valid labels too: for name in set(LABELS.values()): assert lookup(name).name == name def test_invalid_label(): assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') assert_raises(LookupError, encode, 'é', 'invalid') assert_raises(LookupError, iter_decode, [], 'invalid') assert_raises(LookupError, iter_encode, [], 'invalid') assert_raises(LookupError, IncrementalDecoder, 'invalid') assert_raises(LookupError, IncrementalEncoder, 'invalid') def test_decode(): assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) def test_encode(): assert encode('é', 'latin1') == b'\xe9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf-16') == b'\xe9\x00' assert encode('é', 'utf-16le') == b'\xe9\x00' assert encode('é', 'utf-16be') == b'\x00\xe9' def test_iter_decode(): def iter_decode_to_string(input, fallback_encoding): output, _encoding = iter_decode(input, fallback_encoding) return ''.join(output) assert iter_decode_to_string([], 'latin1') == '' assert iter_decode_to_string([b''], 'latin1') == '' assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' assert iter_decode_to_string([ b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' def test_iter_encode(): assert b''.join(iter_encode([], 'latin1')) == b'' assert b''.join(iter_encode([''], 'latin1')) == b'' assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' assert b''.join(iter_encode([ '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' def test_x_user_defined(): encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' encoded = b'aa' decoded = 'aa' assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) assert encode(decoded, 'x-user-defined') == encoded PK.e[9*0 webencodings/tests.pyonu[ abc@sdZddlmZddlmZmZmZmZmZm Z m Z m Z m Z dZ dZdZdZd Zd Zd Zd Zd ZdS(u webencodings.tests ~~~~~~~~~~~~~~~~~~ A basic test suite for Encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsi( tlookuptLABELStdecodetencodet iter_decodet iter_encodetIncrementalDecodertIncrementalEncodertUTF8cOs:y|||Wn|k r%dSXtd|dS(NuDid not raise %s.(tAssertionError(t exceptiontfunctiontargstkwargs((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt assert_raisess  cCsdS(N((((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_labelsscCsx\tD]T}x3dddgD]"}tdg||\}}qWt|}t|}qWxttjD]}qrWdS(Niii t(RRRRtsettvalues(tlabeltrepeattoutputt_tdecodertencodertname((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_all_labels0s   cCsptttddtttddtttgdtttgdtttdtttddS(Nséuinvalidué(Rt LookupErrorRRRRRR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_invalid_labelCs cCsdS(N((((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_decodeLscCsdS(N((((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_encodebscCs d}dS(NcSs"t||\}}dj|S(Nu(Rtjoin(tinputtfallback_encodingRt _encoding((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pytiter_decode_to_stringls((R$((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_iter_decodeks cCsdS(N((((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_iter_encodescCsd}d}d}d}dS(Ns2, O#ɻtϨu2, O#ttaauaa((tencodedtdecoded((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_x_user_defineds N(t__doc__t __future__RRRRRRRRRRR RRRRRRR%R&R*(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt s@     PK.e[ᅞ,,webencodings/__init__.pyonu[ abc@s6dZddlmZddlZddlmZdZidd6d d 6d d 6d d6ZiZdZ dZ dZ de fdYZ e dZe dZe dZddZdZeddZddZdZeddZdZd e fd!YZd"e fd#YZdS($u webencodings ~~~~~~~~~~~~ This is a Python implementation of the `WHATWG Encoding standard `. See README for details. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsNi(tLABELSu0.5u iso-8859-8u iso-8859-8-iu mac-cyrillicux-mac-cyrillicu mac-romanu macintoshucp874u windows-874cCs|jdjjdS(u9Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. :param string: An Unicode string. :returns: A new Unicode string. This is used for `ASCII case-insensitive `_ matching of encoding labels. The same matching is also used, among other things, for `CSS keywords `_. This is different from the :meth:`~py:str.lower` method of Unicode strings which also affect non-ASCII characters, sometimes mapping them into the ASCII range: >>> keyword = u'Bac\N{KELVIN SIGN}ground' >>> assert keyword.lower() == u'background' >>> assert ascii_lower(keyword) != keyword.lower() >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' uutf8(tencodetlowertdecode(tstring((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt ascii_lower#scCst|jd}tj|}|dkr4dStj|}|dkr|dkrnddlm}n!tj||}t j |}t ||}|t|`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. u ux-user-definedi(t codec_infoN( RtstripRtgettNonetCACHEtx_user_definedRt PYTHON_NAMEStcodecstlookuptEncoding(tlabeltnametencodingRt python_name((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR=s     cCsBt|dr|St|}|dkr>td|n|S(u Accept either an encoding object or label. :param encoding: An :class:`Encoding` object or a label string. :returns: An :class:`Encoding` object. :raises: :exc:`~exceptions.LookupError` for an unknown label. u codec_infouUnknown encoding label: %rN(thasattrRR t LookupError(tencoding_or_labelR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt _get_encoding[s   RcBs eZdZdZdZRS(uOReresents a character encoding such as UTF-8, that can be used for decoding or encoding. .. attribute:: name Canonical name of the encoding .. attribute:: codec_info The actual implementation of the encoding, a stdlib :class:`~codecs.CodecInfo` object. See :func:`codecs.register`. cCs||_||_dS(N(RR(tselfRR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt__init__|s cCs d|jS(Nu (R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt__repr__s(t__name__t __module__t__doc__RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRms uutf-8uutf-16leuutf-16beureplacecCsGt|}t|\}}|p'|}|jj||d|fS(u Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`. i(Rt _detect_bomRR(tinputtfallback_encodingterrorst bom_encodingR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs  cCsa|jdrt|dfS|jdr:t|dfS|jdrWt|dfSd|fS(uBReturn (bom_encoding, input), with any BOM removed from the input.sissiN(t startswitht_UTF16LEt_UTF16BEtUTF8R (R ((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRsustrictcCst|jj||dS(u; Encode a single string. :param input: An Unicode string. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A byte string. i(RRR(R RR"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs cCs4t||}t||}t|}||fS(u "Pull"-based decoder. :param input: An iterable of byte strings. The input is first consumed just enough to determine the encoding based on the precense of a BOM, then consumed on demand when the return value is. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An ``(output, encoding)`` tuple. :obj:`output` is an iterable of Unicode strings, :obj:`encoding` is the :obj:`Encoding` that is being used. (tIncrementalDecodert_iter_decode_generatortnext(R R!R"tdecodert generatorR((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt iter_decodes ccs|j}t|}x]|D])}||}|r|jV|VPqqW|ddt}|jV|rq|VndSx(|D] }||}|r||Vq|q|W|ddt}|r|VndS(uqReturn a generator that first yields the :obj:`Encoding`, then yields output chukns as Unicode strings. ttfinalN(RtiterRtTrue(R R+Rtchuncktoutput((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR)s(       cCst||j}t||S(uY “Pull”-based encoder. :param input: An iterable of Unicode strings. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An iterable of byte strings. (tIncrementalEncoderRt_iter_encode_generator(R RR"R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt iter_encodes ccsOx(|D] }||}|r|VqqW|ddt}|rK|VndS(NuR/(R1(R RR2R3((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR5s   R(cBs&eZdZddZedZRS(uO “Push”-based decoder. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. ureplacecCs7t||_||_d|_d|_d|_dS(NR.(Rt_fallback_encodingt_errorst_bufferR t_decoderR(RR!R"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRs    cCs|j}|dk r"|||S|j|}t|\}}|dkrt|dkrs| rs||_dS|j}n|jj|jj }||_||_ |||S(uDecode one chunk of the input. :param input: A byte string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: An Unicode string. iuN( R:R R9RtlenR7RtincrementaldecoderR8RR(RR R/R+R((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR's         (RRRRtFalseR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR(s  R4cBseZdZeddZRS(u “Push”-based encoder. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. .. method:: encode(input, final=False) :param input: An Unicode string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: A byte string. ustrictcCs(t|}|jj|j|_dS(N(RRtincrementalencoderR(RRR"((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyRTs (RRRR'R(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyR4Cs(Rt __future__RRtlabelsRtVERSIONR R RRRtobjectRR'R%R&RRRR-R)R6R5R(R4(((sE/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyt s4             3PK.e[, , webencodings/x_user_defined.pycnu[ abc@sdZddlmZddlZdejfdYZdejfdYZdejfd YZd eejfd YZd eejfd YZej dddej dej dedededeZ dZ eje ZdS(u webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsNtCodeccBs eZddZddZRS(ustrictcCstj||tS(N(tcodecstcharmap_encodetencoding_table(tselftinputterrors((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pytencodescCstj||tS(N(Rtcharmap_decodetdecoding_table(RRR((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pytdecodes(t__name__t __module__RR (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRs tIncrementalEncodercBseZedZRS(cCstj||jtdS(Ni(RRRR(RRtfinal((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRs(R R tFalseR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRstIncrementalDecodercBseZedZRS(cCstj||jtdS(Ni(RR RR (RRR((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR $s(R R RR (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR#st StreamWritercBseZRS((R R (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR(st StreamReadercBseZRS((R R (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR,stnameux-user-definedRR tincrementalencodertincrementaldecodert streamreadert streamwriteru  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~(t__doc__t __future__RRRRRRRt CodecInfoRR t codec_infoR t charmap_buildR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyt s$      PK.e[, , webencodings/x_user_defined.pyonu[ abc@sdZddlmZddlZdejfdYZdejfdYZdejfd YZd eejfd YZd eejfd YZej dddej dej dedededeZ dZ eje ZdS(u webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsNtCodeccBs eZddZddZRS(ustrictcCstj||tS(N(tcodecstcharmap_encodetencoding_table(tselftinputterrors((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pytencodescCstj||tS(N(Rtcharmap_decodetdecoding_table(RRR((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pytdecodes(t__name__t __module__RR (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRs tIncrementalEncodercBseZedZRS(cCstj||jtdS(Ni(RRRR(RRtfinal((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRs(R R tFalseR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyRstIncrementalDecodercBseZedZRS(cCstj||jtdS(Ni(RR RR (RRR((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR $s(R R RR (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR#st StreamWritercBseZRS((R R (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR(st StreamReadercBseZRS((R R (((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyR,stnameux-user-definedRR tincrementalencodertincrementaldecodert streamreadert streamwriteru  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~(t__doc__t __future__RRRRRRRt CodecInfoRR t codec_infoR t charmap_buildR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyt s$      PK.e[HoEwebencodings/x_user_defined.pynu[# coding: utf8 """ webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self, input, errors='strict'): return codecs.charmap_encode(input, errors, encoding_table) def decode(self, input, errors='strict'): return codecs.charmap_decode(input, errors, decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input, self.errors, encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input, self.errors, decoding_table)[0] class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass ### encodings module API codec_info = codecs.CodecInfo( name='x-user-defined', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table # Python 3: # for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) decoding_table = ( '\x00' '\x01' '\x02' '\x03' '\x04' '\x05' '\x06' '\x07' '\x08' '\t' '\n' '\x0b' '\x0c' '\r' '\x0e' '\x0f' '\x10' '\x11' '\x12' '\x13' '\x14' '\x15' '\x16' '\x17' '\x18' '\x19' '\x1a' '\x1b' '\x1c' '\x1d' '\x1e' '\x1f' ' ' '!' '"' '#' '$' '%' '&' "'" '(' ')' '*' '+' ',' '-' '.' '/' '0' '1' '2' '3' '4' '5' '6' '7' '8' '9' ':' ';' '<' '=' '>' '?' '@' 'A' 'B' 'C' 'D' 'E' 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'O' 'P' 'Q' 'R' 'S' 'T' 'U' 'V' 'W' 'X' 'Y' 'Z' '[' '\\' ']' '^' '_' '`' 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 'u' 'v' 'w' 'x' 'y' 'z' '{' '|' '}' '~' '\x7f' '\uf780' '\uf781' '\uf782' '\uf783' '\uf784' '\uf785' '\uf786' '\uf787' '\uf788' '\uf789' '\uf78a' '\uf78b' '\uf78c' '\uf78d' '\uf78e' '\uf78f' '\uf790' '\uf791' '\uf792' '\uf793' '\uf794' '\uf795' '\uf796' '\uf797' '\uf798' '\uf799' '\uf79a' '\uf79b' '\uf79c' '\uf79d' '\uf79e' '\uf79f' '\uf7a0' '\uf7a1' '\uf7a2' '\uf7a3' '\uf7a4' '\uf7a5' '\uf7a6' '\uf7a7' '\uf7a8' '\uf7a9' '\uf7aa' '\uf7ab' '\uf7ac' '\uf7ad' '\uf7ae' '\uf7af' '\uf7b0' '\uf7b1' '\uf7b2' '\uf7b3' '\uf7b4' '\uf7b5' '\uf7b6' '\uf7b7' '\uf7b8' '\uf7b9' '\uf7ba' '\uf7bb' '\uf7bc' '\uf7bd' '\uf7be' '\uf7bf' '\uf7c0' '\uf7c1' '\uf7c2' '\uf7c3' '\uf7c4' '\uf7c5' '\uf7c6' '\uf7c7' '\uf7c8' '\uf7c9' '\uf7ca' '\uf7cb' '\uf7cc' '\uf7cd' '\uf7ce' '\uf7cf' '\uf7d0' '\uf7d1' '\uf7d2' '\uf7d3' '\uf7d4' '\uf7d5' '\uf7d6' '\uf7d7' '\uf7d8' '\uf7d9' '\uf7da' '\uf7db' '\uf7dc' '\uf7dd' '\uf7de' '\uf7df' '\uf7e0' '\uf7e1' '\uf7e2' '\uf7e3' '\uf7e4' '\uf7e5' '\uf7e6' '\uf7e7' '\uf7e8' '\uf7e9' '\uf7ea' '\uf7eb' '\uf7ec' '\uf7ed' '\uf7ee' '\uf7ef' '\uf7f0' '\uf7f1' '\uf7f2' '\uf7f3' '\uf7f4' '\uf7f5' '\uf7f6' '\uf7f7' '\uf7f8' '\uf7f9' '\uf7fa' '\uf7fb' '\uf7fc' '\uf7fd' '\uf7fe' '\uf7ff' ) ### Encoding table encoding_table = codecs.charmap_build(decoding_table) PK.e[cS^webencodings/tests.pycnu[ abc@sdZddlmZddlmZmZmZmZmZm Z m Z m Z m Z dZ dZdZdZd Zd Zd Zd Zd ZdS(u webencodings.tests ~~~~~~~~~~~~~~~~~~ A basic test suite for Encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. i(tunicode_literalsi( tlookuptLABELStdecodetencodet iter_decodet iter_encodetIncrementalDecodertIncrementalEncodertUTF8cOs:y|||Wn|k r%dSXtd|dS(NuDid not raise %s.(tAssertionError(t exceptiontfunctiontargstkwargs((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt assert_raisess  cCstdjdksttdjdks6ttdjdksQttdjdkslttdjdksttdjdksttdjdksttddksttddksttd jd ksttd jd ks#ttd jd ks>ttd jd ksYttddksqttddkstdS(Nuutf-8uUtf-8uUTF-8uutf8uutf8 u utf8 uu8uutf-8 uUS-ASCIIu windows-1252u iso-8859-1ulatin1uLATIN1ulatin-1uLATİN1(RtnameR tNone(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_labelsscCsxVtD]N}td|dt|fks4ttd|dksOtxsdddgD]b}tdg||\}}t|gkstttdg||gks_tq_Wt|}|jddkst|jddt dks tt |}|jddks4t|jddt dkstqWx5t tj D]!}t|j |ksltqlWdS(Ntuiii tfinal(RRRR RRtlistRRtTrueRtsettvaluesR(tlabeltrepeattoutputt_tdecodertencoderR((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_all_labels0s ', ! %cCsptttddtttddtttgdtttgdtttdtttddS(Nséuinvalidué(Rt LookupErrorRRRRRR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_invalid_labelCs cCsztdddtdfks'ttdtddtdfksTttdddtdfks{ttdtdtdfksttdddtdfksttd ddtdfksttd ddtd fksttd ddtd fks>ttdddtd fksettdddtd fksttdddtd fksttdddtd fksttdddtd fksttdddtd fks(ttdddtd fksOttdddtd fksvtdS(Nsulatin1u€séuutf8uéuasciiuésésuutf-16besuutf-16lesussuUTF-16BEsuUTF-16LEuUTF-16(RRR R (((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_decodeLs '-'''''''''''''cCstdddksttdddks6ttdddksQttdddkslttdddksttdd d kstdS( Nuéulatin1suutf8séuutf-16suutf-16leuutf-16bes(RR (((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt test_encodebs cCs>d}|gddks$t|dgddksBt|dgddks`t|dgddks~t|d d gddkst|d d gddkst|d gddkst|dgddkst|dddgddks t|dddgddksDt|ddddddgddksqt|dgddkst|dgddkst|dgddkst|dgddkst|ddddddgddkst|ddd gdd ks:tdS(!NcSs"t||\}}dj|S(Nu(Rtjoin(tinputtfallback_encodingRt _encoding((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pytiter_decode_to_stringlsulatin1uRsuéthellouhellothetllothelltoséuéséssstaua�sssuï»sssssshux-user-defineduhllo(R (R(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_iter_decodeks. !!!!**cCsfdjtgddks$tdjtdgddksKtdjtdgddksrtdjtddddgddkstdjtddddgddkstdjtddddgddkstdjtddddgd d ks2tdjtdd dd gd dksbtdS(NRulatin1uuésuutf-16suutf-16leuutf-16besuhulloux-user-definedshllo(R$RR (((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_iter_encodes$''0000 cCs^d}d}d}d}t|d|tdfks?tt|d|ksZtdS(Ns2, O#ɻtϨu2, O#ttaauaaux-user-defined(RRR R(tencodedtdecoded((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyttest_x_user_defineds 'N(t__doc__t __future__RRRRRRRRRRR RRRR!R"R#R/R0R4(((sB/usr/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyt s@     PK.e[o8P)P)webencodings/__init__.pynu[# coding: utf8 """ webencodings ~~~~~~~~~~~~ This is a Python implementation of the `WHATWG Encoding standard `. See README for details. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs from .labels import LABELS VERSION = '0.5' # Some names in Encoding are not valid Python aliases. Remap these. PYTHON_NAMES = { 'iso-8859-8-i': 'iso-8859-8', 'x-mac-cyrillic': 'mac-cyrillic', 'macintosh': 'mac-roman', 'windows-874': 'cp874'} CACHE = {} def ascii_lower(string): r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. :param string: An Unicode string. :returns: A new Unicode string. This is used for `ASCII case-insensitive `_ matching of encoding labels. The same matching is also used, among other things, for `CSS keywords `_. This is different from the :meth:`~py:str.lower` method of Unicode strings which also affect non-ASCII characters, sometimes mapping them into the ASCII range: >>> keyword = u'Bac\N{KELVIN SIGN}ground' >>> assert keyword.lower() == u'background' >>> assert ascii_lower(keyword) != keyword.lower() >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' """ # This turns out to be faster than unicode.translate() return string.encode('utf8').lower().decode('utf8') def lookup(label): """ Look for an encoding by its label. This is the spec’s `get an encoding `_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. """ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. label = ascii_lower(label.strip('\t\n\f\r ')) name = LABELS.get(label) if name is None: return None encoding = CACHE.get(name) if encoding is None: if name == 'x-user-defined': from .x_user_defined import codec_info else: python_name = PYTHON_NAMES.get(name, name) # Any python_name value that gets to here should be valid. codec_info = codecs.lookup(python_name) encoding = Encoding(name, codec_info) CACHE[name] = encoding return encoding def _get_encoding(encoding_or_label): """ Accept either an encoding object or label. :param encoding: An :class:`Encoding` object or a label string. :returns: An :class:`Encoding` object. :raises: :exc:`~exceptions.LookupError` for an unknown label. """ if hasattr(encoding_or_label, 'codec_info'): return encoding_or_label encoding = lookup(encoding_or_label) if encoding is None: raise LookupError('Unknown encoding label: %r' % encoding_or_label) return encoding class Encoding(object): """Reresents a character encoding such as UTF-8, that can be used for decoding or encoding. .. attribute:: name Canonical name of the encoding .. attribute:: codec_info The actual implementation of the encoding, a stdlib :class:`~codecs.CodecInfo` object. See :func:`codecs.register`. """ def __init__(self, name, codec_info): self.name = name self.codec_info = codec_info def __repr__(self): return '' % self.name #: The UTF-8 encoding. Should be used for new content and formats. UTF8 = lookup('utf-8') _UTF16LE = lookup('utf-16le') _UTF16BE = lookup('utf-16be') def decode(input, fallback_encoding, errors='replace'): """ Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`. """ # Fail early if `encoding` is an invalid label. fallback_encoding = _get_encoding(fallback_encoding) bom_encoding, input = _detect_bom(input) encoding = bom_encoding or fallback_encoding return encoding.codec_info.decode(input, errors)[0], encoding def _detect_bom(input): """Return (bom_encoding, input), with any BOM removed from the input.""" if input.startswith(b'\xFF\xFE'): return _UTF16LE, input[2:] if input.startswith(b'\xFE\xFF'): return _UTF16BE, input[2:] if input.startswith(b'\xEF\xBB\xBF'): return UTF8, input[3:] return None, input def encode(input, encoding=UTF8, errors='strict'): """ Encode a single string. :param input: An Unicode string. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A byte string. """ return _get_encoding(encoding).codec_info.encode(input, errors)[0] def iter_decode(input, fallback_encoding, errors='replace'): """ "Pull"-based decoder. :param input: An iterable of byte strings. The input is first consumed just enough to determine the encoding based on the precense of a BOM, then consumed on demand when the return value is. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An ``(output, encoding)`` tuple. :obj:`output` is an iterable of Unicode strings, :obj:`encoding` is the :obj:`Encoding` that is being used. """ decoder = IncrementalDecoder(fallback_encoding, errors) generator = _iter_decode_generator(input, decoder) encoding = next(generator) return generator, encoding def _iter_decode_generator(input, decoder): """Return a generator that first yields the :obj:`Encoding`, then yields output chukns as Unicode strings. """ decode = decoder.decode input = iter(input) for chunck in input: output = decode(chunck) if output: assert decoder.encoding is not None yield decoder.encoding yield output break else: # Input exhausted without determining the encoding output = decode(b'', final=True) assert decoder.encoding is not None yield decoder.encoding if output: yield output return for chunck in input: output = decode(chunck) if output: yield output output = decode(b'', final=True) if output: yield output def iter_encode(input, encoding=UTF8, errors='strict'): """ “Pull”-based encoder. :param input: An iterable of Unicode strings. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An iterable of byte strings. """ # Fail early if `encoding` is an invalid label. encode = IncrementalEncoder(encoding, errors).encode return _iter_encode_generator(input, encode) def _iter_encode_generator(input, encode): for chunck in input: output = encode(chunck) if output: yield output output = encode('', final=True) if output: yield output class IncrementalDecoder(object): """ “Push”-based decoder. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. """ def __init__(self, fallback_encoding, errors='replace'): # Fail early if `encoding` is an invalid label. self._fallback_encoding = _get_encoding(fallback_encoding) self._errors = errors self._buffer = b'' self._decoder = None #: The actual :class:`Encoding` that is being used, #: or :obj:`None` if that is not determined yet. #: (Ie. if there is not enough input yet to determine #: if there is a BOM.) self.encoding = None # Not known yet. def decode(self, input, final=False): """Decode one chunk of the input. :param input: A byte string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: An Unicode string. """ decoder = self._decoder if decoder is not None: return decoder(input, final) input = self._buffer + input encoding, input = _detect_bom(input) if encoding is None: if len(input) < 3 and not final: # Not enough data yet. self._buffer = input return '' else: # No BOM encoding = self._fallback_encoding decoder = encoding.codec_info.incrementaldecoder(self._errors).decode self._decoder = decoder self.encoding = encoding return decoder(input, final) class IncrementalEncoder(object): """ “Push”-based encoder. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. .. method:: encode(input, final=False) :param input: An Unicode string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: A byte string. """ def __init__(self, encoding=UTF8, errors='strict'): encoding = _get_encoding(encoding) self.encode = encoding.codec_info.incrementalencoder(errors).encode PK.e[Vdistlib/compat.pycnu[ abc@@sddlmZddlZddlZddlZyddlZWnek r]dZnXejddkr ddl m Z e fZ e Z ddlmZddlZddlZddlmZddlmZmZmZmZmZdd lmZmZm Z m!Z!m"Z"m#Z#m$Z$d Zddl%Z%dd l%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.erdd l%m/Z/nddl0Z0ddl1Z1ddl2Z3dd l4m4Z4ddl5Z5e6Z6ddl7m8Z9ddl7m:Z;da<dZ=nddl>m Z e?fZ e?Z ddl>m@ZddlZddlZddlZddlAmZmZmZm=Z=mZm Z mZmZm$Z$ddlBm'Z'mZm&Z&m!Z!m"Z"m*Z*m+Z+m,Z,m-Z-m.Z.erdd lBm/Z/nddlCm)Z)m(Z(m#Z#ddlDjEZ0ddlBjFZ%ddlGjEZ1ddl3Z3dd lHm4Z4ddlIjJZ5eKZ6ddl7m;Z;e9Z9yddlmLZLmMZMWn<ek rdeNfdYZMddZOdZLnXyddlmPZQWn'ek r"deRfdYZQnXyddlmSZSWn*ek rcejTejUBddZSnXdd lVmWZXeYeXd!reXZWn<dd"lVmZZ[d#e[fd$YZZd%eXfd&YZWydd'l\m]Z]Wnek rd(Z]nXyddl^Z^Wn!ek r,dd)lm^Z^nXy e_Z_Wn*e`k rcdd*lambZbd+Z_nXyejcZcejdZdWnJeek rejfZgegd,krd-Zhnd.Zhd/Zcd0ZdnXydd1limjZjWnTek r1dd2lkmlZlmmZmddlZejnd3Zod4Zpd5ZjnXydd6lqmrZrWn!ek ridd6lsmrZrnXejd7 dTkre4jtZtndd9lqmtZtydd:lamuZuWnkek rdd;lamvZvydd<lwmxZyWnek rd=d>ZynXd?evfd@YZunXyddAlzm{Z{Wnek rQddBZ{nXyddClam|Z|Wnek ryddDl}m~ZWn!ek rddDlm~ZnXy ddElmZmZmZWnek rnXdFefdGYZ|nXyddHlmZmZWnek rejndIejZdJZdKefdLYZddMZdNefdOYZdPefdQYZdReRfdSYZnXdS(Ui(tabsolute_importNi(tStringIO(tFileTypei(tshutil(turlparset urlunparseturljointurlsplitt urlunsplit(t urlretrievetquotetunquotet url2pathnamet pathname2urltContentTooShortErrort splittypecC@s+t|tr!|jd}nt|S(Nsutf-8(t isinstancetunicodetencodet_quote(ts((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR s( tRequestturlopentURLErrort HTTPErrortHTTPBasicAuthHandlertHTTPPasswordMgrt HTTPHandlertHTTPRedirectHandlert build_opener(t HTTPSHandler(t HTMLParser(tifilter(t ifilterfalsecC@sYtdkr*ddl}|jdantj|}|rO|jddSd|fS(sJsplituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.iNs ^(.*)@(.*)$ii(t _userprogtNonetretcompiletmatchtgroup(thostR$R&((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt splituser4s  (t TextIOWrapper( RRRR)R R RRR( RR RR R RRRRR(RRR(t filterfalse(tmatch_hostnametCertificateErrorR-cB@seZRS((t__name__t __module__(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR-^sc C@sSg}|stS|jd}|d|d}}|jd}||krhtdt|n|s|j|jkS|dkr|jdnY|jds|jdr|jtj |n"|jtj |j dd x$|D]}|jtj |qWtj d d j |d tj } | j|S( spMatching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 t.iit*s,too many wildcards in certificate DNS name: s[^.]+sxn--s\*s[^.]*s\As\.s\Z(tFalsetsplittcountR-treprtlowertappendt startswithR$tescapetreplaceR%tjoint IGNORECASER&( tdnthostnamet max_wildcardstpatstpartstleftmostt remaindert wildcardstfragtpat((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_dnsname_matchbs(  " &cC@s[|stdng}|jdd }xC|D];\}}|dkr4t||r_dS|j|q4q4W|sxc|jddD]L}xC|D];\}}|dkrt||rdS|j|qqWqWnt|dkrtd|d jtt|fn;t|dkrKtd ||d fn td dS(s=Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. stempty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIREDtsubjectAltNametDNSNtsubjectt commonNameis&hostname %r doesn't match either of %ss, shostname %r doesn't match %ris=no appropriate commonName or subjectAltName fields were found((( t ValueErrortgetRGR7tlenR-R;tmapR5(tcertR>tdnsnamestsantkeytvaluetsub((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR,s.  %(tSimpleNamespacet ContainercB@seZdZdZRS(sR A generic container for when multiple values need to be returned cK@s|jj|dS(N(t__dict__tupdate(tselftkwargs((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__init__s(R.R/t__doc__R\(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRWs(twhichc @sd}tjjr2||r.SdS|dkrYtjjdtj}n|scdS|jtj}t j dkrtj |kr|j dtj ntjjddjtj}t fd|Drg}qg|D]}|^q}n g}t}xu|D]m}tjj|} | |kr+|j| x9|D].} tjj|| } || |rc| SqcWq+q+WdS( sKGiven a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. cS@s5tjj|o4tj||o4tjj| S(N(tostpathtexiststaccesstisdir(tfntmode((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt _access_checks$tPATHtwin32itPATHEXTtc3@s*|] }jj|jVqdS(N(R6tendswith(t.0text(tcmd(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pys sN(R_R`tdirnameR#tenvironRMtdefpathR3tpathseptsystplatformtcurdirtinserttanytsettnormcasetaddR;( RnReR`RftpathexttfilesRmtseentdirtnormdirtthefiletname((Rns>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR^s8  !        (tZipFilet __enter__(t ZipExtFileRcB@s#eZdZdZdZRS(cC@s|jj|jdS(N(RXRY(RZtbase((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\scC@s|S(N((RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRscG@s|jdS(N(tclose(RZtexc_info((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__exit__s(R.R/R\RR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  RcB@s#eZdZdZdZRS(cC@s|S(N((RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR"scG@s|jdS(N(R(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR%scO@stj|||}t|S(N(t BaseZipFiletopenR(RZtargsR[R((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR)s(R.R/RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR!s  (tpython_implementationcC@s@dtjkrdStjdkr&dStjjdr<dSdS(s6Return a string identifying the Python implementation.tPyPytjavatJythont IronPythontCPython(RstversionR_RR8(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR0s(t sysconfig(tCallablecC@s t|tS(N(RR(tobj((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytcallableDstmbcststricttsurrogateescapecC@sOt|tr|St|tr2|jttStdt|jdS(Nsexpect bytes or str, not %s( Rtbytest text_typeRt _fsencodingt _fserrorst TypeErrorttypeR.(tfilename((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytfsencodeRs cC@sOt|tr|St|tr2|jttStdt|jdS(Nsexpect bytes or str, not %s( RRRtdecodeRRRRR.(R((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytfsdecode[s (tdetect_encoding(tBOM_UTF8tlookupscoding[:=]\s*([-\w.]+)cC@s^|d jjdd}|dks7|jdr;dS|d ksV|jd rZdS|S(s(Imitates get_normal_name in tokenizer.c.i t_t-sutf-8sutf-8-slatin-1s iso-8859-1s iso-latin-1slatin-1-s iso-8859-1-s iso-latin-1-(slatin-1s iso-8859-1s iso-latin-1(slatin-1-s iso-8859-1-s iso-latin-1-(R6R:R8(torig_enctenc((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_get_normal_namels c@s yjjWntk r)dnXtd}d}fd}fd}|}|jtrt|d}d}n|s|gfS||}|r||gfS|}|s||gfS||}|r|||gfS|||gfS(s? The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. sutf-8c@s$y SWntk rdSXdS(NRj(t StopIteration((treadline(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt read_or_stops  c@s7y|jd}WnDtk rYd}dk rJdj|}nt|nXtj|}|ssdSt|d}yt|}WnHt k rdkrd|}ndj|}t|nXr3|j dkr&dkrd}ndj}t|n|d 7}n|S( Nsutf-8s'invalid or missing encoding declarations {} for {!r}isunknown encoding: sunknown encoding for {!r}: {}sencoding problem: utf-8s encoding problem for {!r}: utf-8s-sig( RtUnicodeDecodeErrorR#tformatt SyntaxErrort cookie_retfindallRRt LookupErrorR(tlinet line_stringtmsgtmatchestencodingtcodec(t bom_foundR(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt find_cookies6          is utf-8-sigN(t__self__RtAttributeErrorR#R2R8RtTrue(RRtdefaultRRtfirsttsecond((RRRs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRws4  &         (R9ii(tunescape(tChainMap(tMutableMapping(trecursive_reprs...c@sfd}|S(sm Decorator to make a repr function return fillvalue for a recursive call c@smtfd}td|_td|_td|_tdi|_|S(Nc@sWt|tf}|kr%Sj|z|}Wdj|X|S(N(tidt get_identRztdiscard(RZRStresult(t fillvaluet repr_runningt user_function(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytwrappers  R/R]R.t__annotations__(RxtgetattrR/R]R.R(RR(R(RRs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytdecorating_functions  ((RR((Rs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_recursive_reprsRcB@seZdZdZdZdZddZdZdZ dZ dZ e d Z ed Zd ZeZd Zed ZdZdZdZdZdZRS(s A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. cG@st|pig|_dS(sInitialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. N(tlisttmaps(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\ scC@st|dS(N(tKeyError(RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __missing__scC@sAx1|jD]&}y ||SWq tk r/q Xq W|j|S(N(RRR(RZRStmapping((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __getitem__s   cC@s||kr||S|S(N((RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMscC@sttj|jS(N(RNRxtunionR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__len__"scC@sttj|jS(N(titerRxRR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__iter__%sc@stfd|jDS(Nc3@s|]}|kVqdS(N((Rltm(RS(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pys )s(RwR(RZRS((RSs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __contains__(scC@s t|jS(N(RwR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__bool__+scC@s%dj|djtt|jS(Ns{0.__class__.__name__}({1})s, (RR;ROR5R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__repr__.scG@s|tj||S(s?Create a ChainMap with a single dict created from the iterable.(tdicttfromkeys(tclstiterableR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR3scC@s$|j|jdj|jdS(sHNew ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]ii(t __class__Rtcopy(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR8scC@s|ji|jS(s;New ChainMap with a new dict followed by all previous maps.(RR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt new_child>scC@s|j|jdS(sNew ChainMap from maps[1:].i(RR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytparentsBscC@s||jd|/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __setitem__GscC@s?y|jd|=Wn&tk r:tdj|nXdS(Nis(Key not found in the first mapping: {!r}(RRR(RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __delitem__Js cC@s9y|jdjSWntk r4tdnXdS(sPRemove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.is#No keys found in the first mapping.N(RtpopitemR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRPs cG@sHy|jdj||SWn&tk rCtdj|nXdS(sWRemove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].is(Key not found in the first mapping: {!r}N(RtpopRR(RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRWs cC@s|jdjdS(s'Clear maps[0], leaving maps[1:] intact.iN(Rtclear(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR^sN(R.R/R]R\RRR#RMRRRRRRt classmethodRRt__copy__RtpropertyRRRRRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs(               (tcache_from_sourcecC@sG|jdst|dkr*t}n|r9d}nd}||S(Ns.pytcto(RktAssertionErrorR#t __debug__(R`tdebug_overridetsuffix((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRes   (t OrderedDict(R(tKeysViewt ValuesViewt ItemsViewRcB@seZdZdZejdZejdZdZdZdZ e dZ dZ d Z d Zd Zd Zd ZdZeZeZedZddZddZdZdZeddZdZdZdZ dZ!dZ"RS(s)Dictionary that remembers insertion ordercO@st|dkr+tdt|ny |jWn7tk rog|_}||dg|(i|_nX|j||dS(sInitialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. is$expected at most 1 arguments, got %dN(RNRt_OrderedDict__rootRR#t_OrderedDict__mapt_OrderedDict__update(RZRtkwdstroot((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\s    cC@s\||krH|j}|d}|||g|d<|d<|j| od[i]=yiiN(RR(RZRSRTt dict_setitemRtlast((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    )cC@s@||||jj|\}}}||d<||d del od[y]iiN(RR(RZRSt dict_delitemt link_prevt link_next((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  cc@s=|j}|d}x#||k r8|dV|d}qWdS(sod.__iter__() <==> iter(od)iiN(R(RZRtcurr((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    cc@s=|j}|d}x#||k r8|dV|d}qWdS(s#od.__reversed__() <==> reversed(od)iiN(R(RZRR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __reversed__s    cC@smyHx|jjD] }|2qW|j}||dg|(|jjWntk r[nXtj|dS(s.od.clear() -> None. Remove all items from od.N(Rt itervaluesRR#RRR(RZtnodeR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  cC@s|stdn|j}|rO|d}|d}||d<||d (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. sdictionary is emptyiii(RRRRR(RZRRtlinkRRRSRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs            cC@s t|S(sod.keys() -> list of keys in od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytkeysscC@sg|D]}||^qS(s#od.values() -> list of values in od((RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytvaluesscC@s!g|D]}|||f^qS(s.od.items() -> list of (key, value) pairs in od((RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytitemsscC@s t|S(s0od.iterkeys() -> an iterator over the keys in od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytiterkeysscc@sx|D]}||VqWdS(s2od.itervalues -> an iterator over the values in odN((RZtk((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs cc@s$x|D]}|||fVqWdS(s=od.iteritems -> an iterator over the (key, value) items in odN((RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt iteritemss cO@s&t|dkr.tdt|fn|sCtdn|d}d}t|dkrr|d}nt|trxw|D]}|||| None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v is8update() takes at most 2 positional arguments (%d given)s,update() takes at least 1 argument (0 given)iiR N((RNRRRthasattrR R (RRRZtotherRSRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRYs&    cC@sC||kr!||}||=|S||jkr?t|n|S(sod.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. (t_OrderedDict__markerR(RZRSRR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR!s  cC@s"||kr||S|||<|S(sDod.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od((RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt setdefault.s  cC@s|si}nt|tf}||kr4dSd|| repr(od)s...is%s()s%s(%r)N(Rt _get_identRR.R (RZt _repr_runningtcall_key((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR5s   cC@sg|D]}|||g^q}t|j}x'ttD]}|j|dqEW|rx|j|f|fS|j|ffS(s%Return state information for picklingN(tvarsRRRR#R(RZRR t inst_dict((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __reduce__Cs#cC@s |j|S(s!od.copy() -> a shallow copy of od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMscC@s(|}x|D]}||| New ordered dictionary with keys from S and values equal to v (which defaults to None). ((RRRTtdRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRQs  cC@sMt|tr=t|t|ko<|j|jkStj||S(sod.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. (RRRNR Rt__eq__(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\s.cC@s ||k S(N((RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__ne__escC@s t|S(s@od.viewkeys() -> a set-like object providing a view on od's keys(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytviewkeysjscC@s t|S(s<od.viewvalues() -> an object providing a view on od's values(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt viewvaluesnscC@s t|S(sBod.viewitems() -> a set-like object providing a view on od's items(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt viewitemsrsN(#R.R/R]R\RRRRRRRRR R R RRRRYRtobjectRRR#RRRRRRRRRRR (((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs:                   (tBaseConfiguratort valid_idents^[a-z_][a-z0-9_]*$cC@s,tj|}|s(td|ntS(Ns!Not a valid Python identifier: %r(t IDENTIFIERR&RLR(RR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR#|stConvertingDictcB@s#eZdZdZddZRS(s A converting dictionary wrapper.cC@sqtj||}|jj|}||k rm|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    cC@sttj|||}|jj|}||k rp|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMs    N(R.R/R]RR#RM(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR%s cC@sjtj|||}|jj|}||k rft|tttfkrf||_||_ qfn|S(N( RRR&R'RR%R(R)R*RS(RZRSRRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs   R(cB@s#eZdZdZddZRS(sA converting list wrapper.cC@sqtj||}|jj|}||k rm|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    icC@s^tj||}|jj|}||k rZt|tttfkrZ||_qZn|S(N( RRR&R'RR%R(R)R*(RZtidxRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  (R.R/R]RR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR(s R)cB@seZdZdZRS(sA converting tuple wrapper.cC@sgtj||}|jj|}||k rct|tttfkrc||_||_ qcn|S(N( ttupleRR&R'RR%R(R)R*RS(RZRSRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs   (R.R/R]R(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR)sR"cB@seZdZejdZejdZejdZejdZejdZ idd6dd 6Z e e Z d Zd Zd Zd ZdZdZdZRS(sQ The configurator base class which defines some useful defaults. s%^(?P[a-z]+)://(?P.*)$s ^\s*(\w+)\s*s^\.\s*(\w+)\s*s^\[\s*(\w+)\s*\]\s*s^\d+$t ext_convertRmt cfg_converttcfgcC@st||_||j_dS(N(R%tconfigR&(RZR0((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\sc C@s|jd}|jd}yy|j|}x_|D]W}|d|7}yt||}Wq7tk r|j|t||}q7Xq7W|SWnVtk rtjd\}}td||f}|||_ |_ |nXdS(sl Resolve strings to objects using standard import and attribute syntax. R0iisCannot resolve %r: %sN( R3RtimporterRRt ImportErrorRsRRLt __cause__t __traceback__( RZRRtusedtfoundREtettbtv((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytresolves"    cC@s |j|S(s*Default converter for the ext:// protocol.(R:(RZRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR-scC@sO|}|jj|}|dkr7td|n||j}|j|jd}x|rJ|jj|}|r||jd}n|jj|}|r|jd}|j j|s||}qyt |}||}Wqt k r||}qXn|r1||j}qatd||fqaW|S(s*Default converter for the cfg:// protocol.sUnable to convert %risUnable to convert %r at %rN( t WORD_PATTERNR&R#RLtendR0tgroupst DOT_PATTERNt INDEX_PATTERNt DIGIT_PATTERNtintR(RZRTtrestRRR+tn((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR.s2     cC@s/t|t r7t|tr7t|}||_nt|t rnt|trnt|}||_nt|t rt|trt|}||_nt|tr+|j j |}|r+|j }|d}|j j |d}|r(|d}t||}||}q(q+n|S(s Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. tprefixRN(RR%RR&R(RR)R,t string_typestCONVERT_PATTERNR&t groupdicttvalue_convertersRMR#R(RZRTRRRDt converterR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR')s*         c C@s|jd}t|s-|j|}n|jdd}tg|D]"}t|rI|||f^qI}||}|rx-|jD]\}}t|||qWn|S(s1Configure an object with a user-supplied factory.s()R0N(RRR:R#RR#R tsetattr( RZR0RtpropsRR[RRRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytconfigure_customEs 5 cC@s"t|trt|}n|S(s0Utility function which converts lists to tuples.(RRR,(RZRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytas_tupleSs(R.R/R]R$R%RFR;R>R?R@RHt staticmethodt __import__R1R\R:R-R.R'RLRM(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR"s"      "  (ii(t __future__RR_R$RstsslR2R#t version_infoRt basestringRERRttypesRt file_typet __builtin__tbuiltinst ConfigParsert configparsert _backportRRRRRRturllibR R RR R R RRturllib2RRRRRRRRRRthttplibt xmlrpclibtQueuetqueueRthtmlentitydefst raw_inputt itertoolsR tfilterR!R+R"R)tiotstrR*t urllib.parseturllib.requestt urllib.errort http.clienttclienttrequestt xmlrpc.clientt html.parsert html.entitiestentitiestinputR,R-RLRGRVRWR!R^tF_OKtX_OKtzipfileRRRRtBaseZipExtFileRtRRRt NameErrort collectionsRRRRtgetfilesystemencodingRRttokenizeRtcodecsRRR%RRthtmlR9tcgiRRRtreprlibRRtimpRRtthreadRRt dummy_threadt_abcollRRRRtlogging.configR"R#tIR$R%RRR(R,R)(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyts$        (4  @         @F   2 +  A                   [   b          PK.e[bޑ66distlib/resources.pyonu[ abc@s ddlmZddlZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl m Z ddl mZmZmZmZejeZdadefdYZdefd YZd efd YZd efd YZdefdYZdefdYZieed6ee j6Z yQyddl!Z"Wne#k rddl$Z"nXee e"j%R R((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytfinds  cCst|jdS(Nurb(RR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR'scCs)t|jd}|jSWdQXdS(Nurb(RR tread(RRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR+scCstjj|jS(N(RR tgetsize(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR,scsDfd}tgtj|jD]}||r%|^q%S(Ncs|dko|jj S(Nu __pycache__(tendswithtskipped_extensions(R (R(sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytalloweds (tsetRtlistdirR (RRRIR ((RsA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR2scCs|j|jS(N(RCR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR0sccs|j|}|dk r|g}x|r|jd}|V|jr'|j}xe|jD]W}|sr|}ndj||g}|j|}|jr|j|q]|Vq]Wq'q'WndS(Niu/(RDRtpopR0R%R3R tappend(RR>RttodotrnameR%tnew_nametchild((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytiterators        (u.pycu.pyou.class(u.pycu.pyo(R"R#R.tsystplatformt startswithRHR R9RARBRRDR'R+R,R2R0t staticmethodRR RRCRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR4ws"          tZipResourceFindercBs_eZdZdZdZdZdZdZdZdZ dZ d Z RS( u6 Resource finder for resources in .zip files. cCstt|j||jj}dt||_t|jdrY|jj|_nt j ||_t |j|_ dS(Niu_files( R RWR R7tarchivetlent prefix_lenthasattrt_filest zipimportt_zip_directory_cachetsortedtindex(RR5RX((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR s cCs|S(N((RR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR9scCs||j}||jkr%t}nr|rN|dtjkrN|tj}ntj|j|}y|j|j|}Wntk rt }nX|st j d||j j nt j d||j j |S(Niu_find failed: %r %ru_find worked: %r %r(RZR\RRR?tbisectR`RUt IndexErrorR/tloggertdebugR7R(RR Rti((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRBs    cCs-|jj}|jdt|}||fS(Ni(R7RXR RY(RRRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRs cCs|jj|jS(N(R7tget_dataR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR+scCstj|j|S(N(tiotBytesIOR+(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR'scCs|j|j}|j|dS(Ni(R RZR\(RRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR,scCs|j|j}|r9|dtjkr9|tj7}nt|}t}tj|j|}xn|t|jkr|j|j|sPn|j||}|j |j tjdd|d7}qfW|S(Niii( R RZRR?RYRJRaR`RUtaddR<(RRR tplenRRets((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR2s   cCs||j}|r6|dtjkr6|tj7}ntj|j|}y|j|j|}Wntk r~t}nX|S(Ni(RZRR?RaR`RURbR/(RR ReR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRCs   ( R"R#R.R R9RBRR+R'R,R2RC(((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRWs       cCs|tt|sJ         ",!ZM       PK.e[.distlib/metadata.pyonu[ abc@sdZddlmZddlZddlmZddlZddlZddlZddl m Z m Z ddl m Z mZmZddlmZdd lmZmZdd lmZmZejeZd e fd YZd e fdYZde fdYZde fdYZdddgZdZ dZ!ej"dZ#ej"dZ$ddddddd d!d"d#d$f Z%ddddd%ddd d!d"d#d$d&d'd(d)d*fZ&d(d)d*d&d'fZ'ddddd%ddd d!d"d#d+d,d$d&d'd-d.d/d0d1d2fZ(d/d0d1d-d2d+d,d.fZ)ddddd%ddd d!d"d#d+d,d$d&d'd-d.d/d0d1d2d3d4d5d6d7fZ*d3d7d4d5d6fZ+e,Z-e-j.e%e-j.e&e-j.e(e-j.e*ej"d8Z/d9Z0d:Z1idd;6dd<6dd=6dd>6d%d?6dd@6ddA6d dB6d!dC6d"dD6d#dE6d+dF6d,dG6d$dH6d&dI6d'dJ6d-dK6d/dL6d0dM6d5dN6d1dO6d2dP6d*dQ6d)dR6d(dS6d.dT6d3dU6d4dV6d6dW6d7dX6Z2d0d-d/fZ3d1fZ4dfZ5dd&d(d*d)d-d/d0d2d.d%d5d7d6fZ6d.fZ7d fZ8d"d+ddfZ9e:Z;ej"dYZ<e=dZZ>d[e:fd\YZ?d]Z@d^ZAd_e:fd`YZBdS(auImplementation of the Metadata for Python packages PEPs. Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). i(tunicode_literalsN(tmessage_from_filei(tDistlibExceptiont __version__(tStringIOt string_typest text_type(t interpret(textract_by_keyt get_extras(t get_schemetPEP440_VERSION_REtMetadataMissingErrorcBseZdZRS(uA required metadata is missing(t__name__t __module__t__doc__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR stMetadataConflictErrorcBseZdZRS(u>Attempt to read or write metadata fields that are conflictual.(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR st MetadataUnrecognizedVersionErrorcBseZdZRS(u Unknown metadata version number.(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR$stMetadataInvalidErrorcBseZdZRS(uA metadata value is invalid(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR(suMetadatauPKG_INFO_ENCODINGuPKG_INFO_PREFERRED_VERSIONuutf-8u1.1u \|u uMetadata-VersionuNameuVersionuPlatformuSummaryu DescriptionuKeywordsu Home-pageuAuthoru Author-emailuLicenseuSupported-Platformu Classifieru Download-URLu ObsoletesuProvidesuRequiresu MaintaineruMaintainer-emailuObsoletes-Distu Project-URLu Provides-Distu Requires-DistuRequires-PythonuRequires-ExternaluPrivate-Versionu Obsoleted-ByuSetup-Requires-Distu ExtensionuProvides-Extrau"extra\s*==\s*("([^"]+)"|'([^']+)')cCsP|dkrtS|dkr tS|dkr0tS|dkr@tSt|dS(Nu1.0u1.1u1.2u2.0(t _241_FIELDSt _314_FIELDSt _345_FIELDSt _426_FIELDSR(tversion((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_version2fieldlistgs    c Csd}g}xB|jD]4\}}|gdd fkrCqn|j|qWddddg}x|D]}|tkrd|kr|jdn|tkrd|kr|jdn|tkrd|kr|jdn|tkrmd|krm|jdqmqmWt|dkr1|dSt|dkrRt d nd|koj||t }d|ko||t }d|ko||t }t |t |t |dkrt d n| r| r| rt|krtSn|r dS|rdSdS( u5Detect the best version depending on the fields used.cSs%x|D]}||krtSqWtS(N(tTruetFalse(tkeystmarkerstmarker((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt _has_markerus  uUNKNOWNu1.0u1.1u1.2u2.0iiuUnknown metadata setu(You used incompatible 1.1/1.2/2.0 fieldsN(titemstNonetappendRtremoveRRRtlenRt _314_MARKERSt _345_MARKERSt _426_MARKERStinttPKG_INFO_PREFERRED_VERSION( tfieldsRRtkeytvaluetpossible_versionstis_1_1tis_1_2tis_2_0((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt _best_versionssB  & umetadata_versionunameuversionuplatformusupported_platformusummaryu descriptionukeywordsu home_pageuauthoru author_emailu maintainerumaintainer_emailulicenseu classifieru download_urluobsoletes_distu provides_distu requires_distusetup_requires_disturequires_pythonurequires_externalurequiresuprovidesu obsoletesu project_urluprivate_versionu obsoleted_byu extensionuprovides_extrau[^A-Za-z0-9.]+cCsG|r9tjd|}tjd|jdd}nd||fS(uhReturn the distribution name with version. If for_filename is true, return a filename-escaped form.u-u u.u%s-%s(t _FILESAFEtsubtreplace(tnameRt for_filename((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_get_name_and_versions!tLegacyMetadatacBs4eZdZdddddZdZdZdZdZdZ dZ d Z d Z d Z d Zed ZdZdZdZdZedZedZddZdZedZedZedZdZdZdZdZ dZ!dZ"RS( uaThe legacy metadata of a release. Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name udefaultcCs|||gjddkr-tdni|_g|_d|_||_|dk rm|j|nB|dk r|j|n&|dk r|j ||j ndS(Niu'path, fileobj and mapping are exclusive( tcountR t TypeErrort_fieldstrequires_filest _dependenciestschemetreadt read_filetupdatetset_metadata_version(tselftpathtfileobjtmappingR=((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt__init__s        cCst|j|jdJscCst|}|d|jdtj|ddd}z|j||Wd|jXdS(u&Write the metadata fields to filepath.uwRbuutf-8N(RcRdt write_fileRe(RBRft skip_unknownRg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRGhscCs<|jx+t|dD]}|j|}|rT|dgdgfkrTqn|tkr|j||dj|qn|tkr|dkr|jd kr|jdd}q|jdd }n|g}n|t krg|D]}dj|^q}nx!|D]}|j|||qWqWd S( u0Write the PKG-INFO format data to a file object.uMetadata-VersionuUNKNOWNu,u Descriptionu1.0u1.1u u u |N(u1.0u1.1( RARRIRVRHtjoinRURXR3Ri(RBt fileobjectRqRnRoR+((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRpps$      % c sfd}|sn^t|drRxL|jD]}||||q4Wn$x!|D]\}}|||qYW|rx*|jD]\}}|||qWndS(uSet metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. cs2|tkr.|r.jj||ndS(N(RTRKRM(R*R+(RB(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_setsukeysN(thasattrRR(RBtothertkwargsRttktv((RBs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR@s cCs|j|}|tks'|dkrt|ttf rt|trwg|jdD]}|j^q\}qg}nF|tkrt|ttf rt|tr|g}qg}nt j t j r|d}t |j}|tkrR|d k rRx|D];}|j|jddst jd|||qqWq|tkr|d k r|j|st jd|||qq|tkr|d k r|j|st jd|||qqn|tkr|dkr|j|}qn||j|d?d@f }i}x;|D]3\}}| sf||jkrD|||||D]3\}}| sk||jkrI||||(t __class__R R4R(RB((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt__repr__msN(#R RRR RFRARHRJRLRPRQRMRWR[R]RR_R`RaR>R?RGRpR@RKRRIRRRRRRoRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR7s>                     ,  , ;    u pydist.jsonu metadata.jsontMetadatacBseZdZejdZejdejZeZ ejdZ dZ de Z id>d6d?d6d@d 6Zd Zd ZiedAfd 6edBfd6e dCfd6e dDfd 6ZdEZdFdFdFddZedGZdFefZdFefZi defd6defd6ed6ed6ed6defd6ed6ed6ed6ed 6d!efd"6dHd$6dId 6Z[[d&ZdFd'Zd(Zed)Z ed*Z!e!j"d+Z!dFdFd,Z#ed-Z$ed.Z%e%j"d/Z%d0Z&d1Z'd2Z(d3Z)id4d6d5d6d6d6d7d 6d8d96d!d"6Z*d:Z+dFdFe,e-d;Z.d<Z/d=Z0RS(Ju The metadata of a release. This implementation uses 2.0 (JSON) metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. u ^\d+(\.\d+)*$u!^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$u .{1,2047}u2.0u distlib (%s)unameuversionulegacyusummaryuqname version license summary description author author_email keywords platform home_page classifiers download_urluwextras run_requires test_requires build_requires dev_requires provides meta_requires obsoleted_by supports_environmentsumetadata_versionu_legacyu_datauschemeudefaultcCs|||gjddkr-tdnd|_d|_||_|dk ry|j||||_Wqtk rtd|d||_|j qXnd}|rt |d}|j }WdQXn|r|j }n|dkri|j d6|j d6|_nt|ts?|jd}ny)tj||_|j|j|Wn9tk rtd t|d||_|j nXdS( Niu'path, fileobj and mapping are exclusiveRER=urbumetadata_versionu generatoruutf-8RD(R8R R9t_legacyt_dataR=t_validate_mappingRR7tvalidateRdR>tMETADATA_VERSIONt GENERATORRzRtdecodetjsontloadst ValueErrorR(RBRCRDRER=Rtf((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRFs>          ulicenseukeywordsu Requires-Distu run_requiresuSetup-Requires-Distubuild_requiresu dev_requiresu test_requiresu meta_requiresuProvides-Extrauextrasumodulesu namespacesuexportsucommandsu Classifieru classifiersu Download-URLu source_urluMetadata-Versionc Cstj|d}tj|d}||kr||\}}|jr|dkrs|dkrgdn|}q|jj|}q|dkrdn|}|d kr|jj||}qt}|}|jjd} | r|dkr| jd |}q|dkrH| jd } | r| j||}qq| jd } | sr|jjd } n| r| j||}qn||kr|}qnQ||krtj||}n0|jr|jj|}n|jj|}|S( Nu common_keysu mapped_keysucommandsuexportsumodulesu namespacesu classifiersu extensionsupython.commandsupython.detailsupython.exports(ucommandsuexportsumodulesu namespacesu classifiers(tobjectt__getattribute__RR RIR( RBR*tcommontmappedtlktmakertresultR+tsentineltd((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRsF           cCso||jkrk|j|\}}|p.|j|krk|j|}|shtd||fqhqkndS(Nu.'%s' is an invalid value for the '%s' property(tSYNTAX_VALIDATORSR=tmatchR(RBR*R+R=tpatternt exclusionstm((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_validate_valuescCs|j||tj|d}tj|d}||kr||\}}|jr~|dkrntn||j|               cCst|j|jtS(N(R6R4RR(RB((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pytname_and_version@scCsd|jr|jd}n|jjdg}d|j|jf}||kr`|j|n|S(Nu Provides-Distuprovidesu%s (%s)(RRRR4RR!(RBRts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pytprovidesDs  cCs*|jr||jd}||kr3|dkrZd }n|}||||d|kr>|}Pq>q>W|dkri|d6}|jd|n*t|dt|B}t||d(R4RRR RX(RBR4R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR(s (((ulegacy((ulegacy(ulegacy(ulegacy(u_legacyu_datauschemeN(unameuversionulicenseukeywordsusummary(u Download-URLN(uMetadata-VersionN(1R RRtretcompiletMETADATA_VERSION_MATCHERtIt NAME_MATCHERR tVERSION_MATCHERtSUMMARY_MATCHERRRRRRRRt __slots__R RFRKt common_keysR{t none_listtdictt none_dictt mapped_keysRRRtpropertyRRtsetterRRRRRRRRRRRRGRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRvs       ,         + ' *     % (CRt __future__RRctemailRRRRtRRtcompatRRRRRtutilRR RR R t getLoggerR R}R RRRt__all__tPKG_INFO_ENCODINGR(RRZRYRRR$RR%RR&RKRRR@tEXTRA_RERR0RTRRRRURiRVRRRR1RR6R7tMETADATA_FILENAMEtWHEEL_METADATA_FILENAMER(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt s                                         8            PK.e[:Yndistlib/database.pycnu[ abc@s0dZddlmZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl m Z mZddlmZddlmZmZddlmZmZmZdd lmZmZmZmZmZmZmZd d d d dgZ ej!e"Z#dZ$dZ%deddde$dfZ&dZ'de(fdYZ)de(fdYZ*de(fdYZ+de+fdYZ,de,fd YZ-d!e,fd"YZ.e-Z/e.Z0d#e(fd$YZ1d%d&Z2d'Z3d(Z4d)Z5dS(*uPEP 376 implementation.i(tunicode_literalsNi(tDistlibExceptiont resources(tStringIO(t get_schemetUnsupportedVersionError(tMetadatatMETADATA_FILENAMEtWHEEL_METADATA_FILENAME(tparse_requirementtcached_propertytparse_name_and_versiont read_exportst write_exportst CSVReadert CSVWriteru DistributionuBaseInstalledDistributionuInstalledDistributionuEggInfoDistributionuDistributionPathupydist-exports.jsonupydist-commands.jsonu INSTALLERuRECORDu REQUESTEDu RESOURCESuSHAREDu .dist-infot_CachecBs)eZdZdZdZdZRS(uL A simple cache mapping names and .dist-info paths to distributions cCsi|_i|_t|_dS(uZ Initialise an instance. There is normally one for each DistributionPath. N(tnametpathtFalset generated(tself((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__init__0s  cCs'|jj|jjt|_dS(uC Clear the cache, setting it to its initial state. N(RtclearRRR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR8s  cCsH|j|jkrD||j|j<|jj|jgj|ndS(u` Add a distribution to the cache. :param dist: The distribution to add. N(RRt setdefaulttkeytappend(Rtdist((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytadd@s(t__name__t __module__t__doc__RRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR,s  tDistributionPathcBseZdZd edZdZdZeeeZ dZ dZ dZ e dZdZd Zd d Zd Zd d ZRS(uU Represents a set of distributions installed on a path (typically sys.path). cCsg|dkrtj}n||_t|_||_t|_t|_t|_ t d|_ dS(u Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. udefaultN( tNonetsysRtTruet _include_distt _include_eggRt_cachet _cache_eggt_cache_enabledRt_scheme(RRt include_egg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRNs        cCs|jS(N(R((R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_get_cache_enabledbscCs ||_dS(N(R((Rtvalue((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_set_cache_enabledescCs|jj|jjdS(u, Clears the internal cache. N(R&RR'(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt clear_cachejs c cst}x|jD]}tj|}|dkr:qn|jd}| s|j r`qnt|j}x^|D]V}|j|}| sv|j|krqvn|jr}|j t r}t t g}x<|D]1}t j||} |j| } | rPqqWqvtj| j} td| dd} WdQXtjd|j|j|jt|jd| d|Vqv|jrv|j d rvtjd|j|j|jt|j|VqvqvWqWdS( uD Yield .dist-info and/or .egg(-info) distributions. utfileobjtschemeulegacyNuFound %stmetadatatenvu .egg-infou.egg(u .egg-infou.egg(tsetRRtfinder_for_pathR!tfindt is_containertsortedR$tendswitht DISTINFO_EXTRRt posixpathtjoint contextlibtclosingt as_streamRtloggertdebugRtnew_dist_classR%told_dist_class( RtseenRtfindertrtrsettentrytpossible_filenamestmetadata_filenamet metadata_pathtpydisttstreamR1((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_yield_distributionsrs@       cCs|jj }|jo |jj }|s/|rxF|jD]8}t|trd|jj|q<|jj|q<W|rt|j_n|rt|j_qndS(uk Scan the path for distributions and populate the cache with those that are found. N( R&RR%R'RMt isinstancetInstalledDistributionRR#(Rtgen_disttgen_eggR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_generate_caches  cCs)|jdd}dj||gtS(uo The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: stringu-u_(treplaceR;R9(tclsRtversion((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytdistinfo_dirnamesccs|js(xv|jD] }|VqWnZ|jx|jjjD] }|VqEW|jrx"|jjjD] }|VqpWndS(u5 Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances N(R(RMRRR&RtvaluesR%R'(RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_distributionss     cCsd}|j}|jsNx|jD]}|j|kr(|}Pq(q(Wne|j||jjkr|jj|d}n2|jr||j jkr|j j|d}n|S(u= Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` iN( R!tlowerR(RMRRRR&RR%R'(RRtresultR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_distributions     c csd}|dk r_y |jjd||f}Wq_tk r[td||fq_Xnx|jD]z}|j}xh|D]`}t|\}}|dkr||kr|VPqq||kr|j|r|VPqqWqlWdS(u Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string u%s (%s)uinvalid name or version: %r, %rN( R!R)tmatchert ValueErrorRRXtprovidesR tmatch( RRRUR\Rtprovidedtptp_nametp_ver((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytprovides_distributions$       cCs;|j|}|dkr.td|n|j|S(u5 Return the path to a resource file. uno distribution named %r foundN(R[R!t LookupErrortget_resource_path(RRt relative_pathR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt get_file_paths ccsxy|jD]k}|j}||kr ||}|dk rY||kru||Vquqxx|jD] }|VqfWq q WdS(u Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. N(RXtexportsR!RW(RtcategoryRRREtdtv((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_exported_entries"s     N(RRRR!RRR+R-tpropertyt cache_enabledR.RMRRt classmethodRVRXR[RdRhRm(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR Js    *    $ t DistributioncBseZdZeZeZdZedZeZ edZ edZ dZ edZ edZedZed Zed Zd Zd Zd ZdZRS(u A base class for distributions, whether installed or from indexes. Either way, it must have some metadata, so that's all that's needed for construction. cCsp||_|j|_|jj|_|j|_d|_d|_d|_d|_ t |_ i|_ dS(u Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. N( R1RRYRRUR!tlocatortdigesttextrastcontextR3t download_urlstdigests(RR1((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRGs        cCs |jjS(uH The source archive download URL for this distribution. (R1t source_url(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRxXscCsd|j|jfS(uX A utility property which displays the name and version in parentheses. u%s (%s)(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytname_and_versionascCsB|jj}d|j|jf}||kr>|j|n|S(u A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. u%s (%s)(R1R^RRUR(Rtplistts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR^hs   cCsS|j}tjd|jt||}t|j|d|jd|jS(Nu%Getting requirements from metadata %rRtR2( R1R?R@ttodicttgetattrR3tget_requirementsRtRu(Rtreq_attrtmdtreqts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_get_requirementsts  cCs |jdS(Nu run_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt run_requires{scCs |jdS(Nu meta_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt meta_requiresscCs |jdS(Nubuild_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytbuild_requiresscCs |jdS(Nu test_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt test_requiresscCs |jdS(Nu dev_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt dev_requiressc Cst|}t|jj}y|j|j}Wn@tk rvtjd||j d}|j|}nX|j }t }x]|j D]R}t |\}} ||krqny|j| }PWqtk rqXqW|S(u Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. u+could not read version %r - using name onlyi(R RR1R0R\t requirementRR?twarningtsplitRRR^R R_( RtreqRER0R\RRZRaRbRc((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytmatches_requirements*      cCs6|jrd|j}nd}d|j|j|fS(uC Return a textual representation of this instance, u [%s]uu(RxRRU(Rtsuffix((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__repr__s cCs[t|t|k r!t}n6|j|jkoT|j|jkoT|j|jk}|S(u< See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. (ttypeRRRURx(RtotherRZ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__eq__s  cCs't|jt|jt|jS(uH Compute hash in a way which matches the equality test. (thashRRURx(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__hash__s(RRRRtbuild_time_dependencyt requestedRRnRxt download_urlRyR^RRRRRRRRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRq5s$   " tBaseInstalledDistributioncBs,eZdZdZddZddZRS(u] This is the base class for installed distributions (whether PEP 376 or legacy). cCs,tt|j|||_||_dS(u Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. N(tsuperRRRt dist_path(RR1RR2((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs  cCs|dkr|j}n|dkr6tj}d}ntt|}d|j}||j}tj|jdj d}d||fS(u Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str uu%s=t=uasciiu%s%sN( R!thasherthashlibtmd5R}Rstbase64turlsafe_b64encodetrstriptdecode(RtdataRtprefixRs((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_hashs      !N(RRRR!RRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs ROcBseZdZdZdddZdZdZdZe dZ dZ dZ d Z d Zed Zd Ze d ZedZdZdZdZdZejZRS(u  Created with the *path* of the ``.dist-info`` directory provided to the constructor. It reads the metadata contained in ``pydist.json`` when it is instantiated., or uses a passed in Metadata instance (useful for when dry-run mode is being used). usha256c Cstj||_}|dkr;ddl}|jn|rr|jrr||jjkrr|jj|j }n|dkr$|j t }|dkr|j t }n|dkr|j d}n|dkrt dt |fntj|j}td|dd}WdQXntt|j||||rb|jrb|jj|ny|j d}Wn'tk rddl}|jnX|dk |_dS(NiuMETADATAuno %s found in %sR/R0ulegacyu REQUESTED(RR4RDR!tpdbt set_traceR(R&RR1R5RRR]R<R=R>RRRORRtAttributeErrorR(RRR1R2RDRRERL((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs4  !       cCsd|j|j|jfS(Nu#(RRUR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR2scCsd|j|jfS(Nu%s %s(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__str__6sc Csg}|jd}tj|j}td|i}x_|D]W}gtt|dD] }d^qb}||\}} } |j|| | fqFWWdQXWdQX|S(u" Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). uRECORDRLiN( tget_distinfo_resourceR<R=R>RtrangetlenR!R( RtresultsRERLt record_readertrowtitmissingRtchecksumtsize((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt _get_records9s (&cCs.i}|jt}|r*|j}n|S(u Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. (RtEXPORTS_FILENAMER (RRZRE((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRiPs cCsLi}|jt}|rHtj|j}t|}WdQXn|S(u Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. N(RRR<R=R>R (RRZRERL((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR ^s cCs8|jt}t|d}t||WdQXdS(u Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. uwN(tget_distinfo_fileRtopenR (RRitrftf((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR msc Cs|jd}tj|jF}td|.}x$|D]\}}||kr@|Sq@WWdQXWdQXtd|dS(uW NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. u RESOURCESRLNu3no resource file with relative path %r is installed(RR<R=R>RtKeyError(RRgRERLtresources_readertrelativet destination((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRfxs  ccs x|jD] }|Vq WdS(u Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) N(R(RRZ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytlist_installed_filessc Cstjj|d}tjj|j}|j|}tjj|d}|jd}tjd||rwdSt |}x|D]}tjj |s|j d rd} } nCdtjj |} t |d} |j| j} WdQX|j|s(|r@|j|r@tjj||}n|j|| | fqW|j|rtjj||}n|j|ddfWdQX|S( u Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. uuRECORDu creating %su.pycu.pyou%durbN(u.pycu.pyo(tosRR;tdirnamet startswithRR?tinfoR!RtisdirR8tgetsizeRRtreadtrelpathtwriterow( RtpathsRtdry_runtbasetbase_under_prefixt record_pathtwriterRt hash_valueRtfp((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytwrite_installed_filess. ! c Csg}tjj|j}|jd}xn|jD]`\}}}tjj|sptjj||}n||krq7ntjj|s|j|dt t fq7tjj |r7t tjj |}|r||kr|j|d||fq|rd|kr3|jddd}nd }t|dG} |j| j|} | |kr|j|d|| fnWd QXqq7q7W|S( u Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. uRECORDuexistsusizeu=iiurbuhashN(RRRRRtisabsR;texistsRR#RtisfiletstrRRR!RRR( Rt mismatchesRRRRRt actual_sizeRRt actual_hash((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytcheck_installed_filess.    ,cCsi}tjj|jd}tjj|rtj|ddd}|jj}WdQXx[|D]P}|jdd\}}|dkr|j |gj |qj|||su%s (%s)( RtstripRR?RR Rtt constraintsRRR;(RtreqsRRREtcons((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytparse_requires_dataos&       csRg}y4tj|dd}|j}WdQXWntk rMnX|S(uCreate a list of dependencies from a requires.txt file. *req_path*: the path to a setuptools-produced requires.txt file. uruutf-8N(RRRtIOError(treq_pathRR(R(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytparse_requires_paths u.egguEGG-INFOuPKG-INFORR0ulegacyu requires.txtuEGG-INFO/PKG-INFOuutf8R/uEGG-INFO/requires.txtuutf-8u .egg-infou,path must end with .egg-info or .egg, got %r(R!R8RRRR;Rt zipimportt zipimporterRtget_dataRRRtadd_requirements( RRtrequiresRt meta_pathR1RtzipfR/R((Rs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRls:     cCsd|j|j|jfS(Nu!(RRUR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRscCsd|j|jfS(Nu%s %s(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRscCsg}tjj|jd}tjj|rx`|jD]O\}}}||kr^q=ntjj|s=|j|dttfq=q=Wn|S(u Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. uinstalled-files.txtuexists(RRR;RRRR#R(RRRRt_((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs  #c Cs2d}d}tjj|jd}g}tjj|r.tj|ddd}x|D]}|j}tjjtjj|j|}tjj|stj d||j d rqdqntjj |sd|j |||||fqdqdWWd QX|j |d d fn|S( u Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) cSs@t|d}z|j}Wd|jXtj|jS(Nurb(RRtcloseRRt hexdigest(RRtcontent((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_md5s  cSstj|jS(N(Rtstattst_size(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_sizesuinstalled-files.txturRuutf-8uNon-existent file: %su.pycu.pyoN(u.pycu.pyo(RRR;RRRRtnormpathR?RR8RRR!(RRRRRZRRRa((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs"    $ /c cstjj|jd}t}tj|ddd}x|D]}|j}|dkrjt}q@n|s@tjjtjj|j|}|j |jr|r|Vq|Vqq@q@WWdQXdS(u  Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths uinstalled-files.txturRuutf-8u./N( RRR;R#RRRRRR(RtabsoluteRtskipRRRa((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs    $cCst|to|j|jkS(N(RNRR(RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRsN(RRRR#RRR!RRRRRRRRRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRNs  K    &  tDependencyGraphcBsheZdZdZdZd dZdZdZddZ e dZ d Z d Z RS( u Represents a dependency graph between distributions. The dependency relationships are stored in an ``adjacency_list`` that maps distributions to a list of ``(other, label)`` tuples where ``other`` is a distribution and the edge is labeled with ``label`` (i.e. the version specifier, if such was provided). Also, for more efficient traversal, for every distribution ``x``, a list of predecessors is kept in ``reverse_list[x]``. An edge from distribution ``a`` to distribution ``b`` means that ``a`` depends on ``b``. If any missing dependencies are found, they are stored in ``missing``, which is a dictionary that maps distributions to a list of requirements that were not provided by any other distributions. cCsi|_i|_i|_dS(N(tadjacency_listt reverse_listR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR.s  cCsg|j| "%s" [label="%s"] u "%s" -> "%s" usubgraph disconnected { ulabel = "Disconnected" ubgcolor = red u"%s"u u} N(RRtitemsRRR!R(RRtskip_disconnectedt disconnectedRtadjsRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytto_dotgs&    %    cCs=g}i}x(|jjD]\}}|||t|jD])\}}|sZ|j|||=qZqZW|sPnxO|jD]A\}}g|D]$\}}||kr||f^q||sL         4  7F 6  PK.e[8-]R]Rdistlib/index.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import hashlib import logging import os import shutil import subprocess import tempfile try: from threading import Thread except ImportError: from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) from .util import cached_property, zip_dir, ServerProxy logger = logging.getLogger(__name__) DEFAULT_INDEX = 'https://pypi.python.org/pypi' DEFAULT_REALM = 'pypi' class PackageIndex(object): """ This class represents a package index compatible with PyPI, the Python Package Index. """ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' def __init__(self, url=None): """ Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. """ self.url = url or DEFAULT_INDEX self.read_configuration() scheme, netloc, path, params, query, frag = urlparse(self.url) if params or query or frag or scheme not in ('http', 'https'): raise DistlibException('invalid repository: %s' % self.url) self.password_handler = None self.ssl_verifier = None self.gpg = None self.gpg_home = None self.rpc_proxy = None with open(os.devnull, 'w') as sink: # Use gpg by default rather than gpg2, as gpg2 insists on # prompting for passwords for s in ('gpg', 'gpg2'): try: rc = subprocess.check_call([s, '--version'], stdout=sink, stderr=sink) if rc == 0: self.gpg = s break except OSError: pass def _get_pypirc_command(self): """ Get the distutils command for interacting with PyPI configurations. :return: the command. """ from distutils.core import Distribution from distutils.config import PyPIRCCommand d = Distribution() return PyPIRCCommand(d) def read_configuration(self): """ Read the PyPI access configuration as supported by distutils, getting PyPI to do the actual work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. """ # get distutils to do the work c = self._get_pypirc_command() c.repository = self.url cfg = c._read_pypirc() self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') self.url = cfg.get('repository', self.url) def save_configuration(self): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. """ self.check_credentials() # get distutils to do the work c = self._get_pypirc_command() c._store_pypirc(self.username, self.password) def check_credentials(self): """ Check that ``username`` and ``password`` have been set, and raise an exception if not. """ if self.username is None or self.password is None: raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() _, netloc, _, _, _, _ = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) def register(self, metadata): """ Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request) def _reader(self, name, stream, outbuf): """ Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. """ while True: s = stream.readline() if not s: break s = s.decode('utf-8').rstrip() outbuf.append(s) logger.debug('%s: %s' % (name, s)) stream.close() def get_sign_command(self, filename, signer, sign_password, keystore=None): """ Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) if sign_password is not None: cmd.extend(['--batch', '--passphrase-fd', '0']) td = tempfile.mkdtemp() sf = os.path.join(td, os.path.basename(filename) + '.asc') cmd.extend(['--detach-sign', '--armor', '--local-user', signer, '--output', sf, filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd, sf def run_command(self, cmd, input_data=None): """ Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess' exit code, a list of lines read from the subprocess' ``stdout``, and a list of lines read from the subprocess' ``stderr``. """ kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } if input_data is not None: kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) # We don't use communicate() here because we may need to # get clever with interacting with the command t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if input_data is not None: p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return p.returncode, stdout, stderr def sign_file(self, filename, signer, sign_password, keystore=None): """ Sign a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The absolute pathname of the file where the signature is stored. """ cmd, sig_file = self.get_sign_command(filename, signer, sign_password, keystore) rc, stdout, stderr = self.run_command(cmd, sign_password.encode('utf-8')) if rc != 0: raise DistlibException('sign command failed with error ' 'code %s' % rc) return sig_file def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protocol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request) def upload_documentation(self, metadata, doc_dir): """ Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.isdir(doc_dir): raise DistlibException('not a directory: %r' % doc_dir) fn = os.path.join(doc_dir, 'index.html') if not os.path.exists(fn): raise DistlibException('not found: %r' % fn) metadata.validate() name, version = metadata.name, metadata.version zip_data = zip_dir(doc_dir).getvalue() fields = [(':action', 'doc_upload'), ('name', name), ('version', version)] files = [('content', name, zip_data)] request = self.encode_request(fields, files) return self.send_request(request) def get_verify_command(self, signature_filename, data_filename, keystore=None): """ Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) cmd.extend(['--verify', signature_filename, data_filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd def verify_signature(self, signature_filename, data_filename, keystore=None): """ Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False. """ if not self.gpg: raise DistlibException('verification unavailable because gpg ' 'unavailable') cmd = self.get_verify_command(signature_filename, data_filename, keystore) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): raise DistlibException('verify command failed with error ' 'code %s' % rc) return rc == 0 def download_file(self, url, destfile, digest=None, reporthook=None): """ This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. """ if digest is None: digester = None logger.debug('No digest specified') else: if isinstance(digest, (list, tuple)): hasher, digest = digest else: hasher = 'md5' digester = getattr(hashlib, hasher)() logger.debug('Digest specified: %s' % digest) # The following code is equivalent to urlretrieve. # We need to do it this way so that we can compute the # digest of the file as we go. with open(destfile, 'wb') as dfp: # addinfourl is not a context manager on 2.x # so we have to use try/finally sfp = self.send_request(Request(url)) try: headers = sfp.info() blocksize = 8192 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, blocksize, size) while True: block = sfp.read(blocksize) if not block: break read += len(block) dfp.write(block) if digester: digester.update(block) blocknum += 1 if reporthook: reporthook(blocknum, blocksize, size) finally: sfp.close() # check that we got the whole file, if we can if size >= 0 and read < size: raise DistlibException( 'retrieval incomplete: got only %d out of %d bytes' % (read, size)) # if we have a digest, it must match. if digester: actual = digester.hexdigest() if digest != actual: raise DistlibException('%s digest mismatch for %s: expected ' '%s, got %s' % (hasher, destfile, digest, actual)) logger.debug('Digest verified: %s', digest) def send_request(self, req): """ Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). """ handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req) def encode_request(self, fields, files): """ Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. """ # Adapted from packaging, which in turn was adapted from # http://code.activestate.com/recipes/146306 parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers) def search(self, terms, operator=None): if isinstance(terms, string_types): terms = {'name': terms} if self.rpc_proxy is None: self.rpc_proxy = ServerProxy(self.url, timeout=3.0) return self.rpc_proxy.search(terms, operator or 'and') PK.e[V\\distlib/version.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2016 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Implementation of a flexible versioning scheme providing support for PEP-440, setuptools-compatible and semantic versioning. """ import logging import re from .compat import string_types __all__ = ['NormalizedVersion', 'NormalizedMatcher', 'LegacyVersion', 'LegacyMatcher', 'SemanticVersion', 'SemanticMatcher', 'UnsupportedVersionError', 'get_scheme'] logger = logging.getLogger(__name__) class UnsupportedVersionError(ValueError): """This is an unsupported version.""" pass class Version(object): def __init__(self, s): self._string = s = s.strip() self._parts = parts = self.parse(s) assert isinstance(parts, tuple) assert len(parts) > 0 def parse(self, s): raise NotImplementedError('please implement in a subclass') def _check_compatible(self, other): if type(self) != type(other): raise TypeError('cannot compare %r and %r' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): self._check_compatible(other) return self._parts < other._parts def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__lt__(other) or self.__eq__(other) def __ge__(self, other): return self.__gt__(other) or self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self._parts) def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self._string) def __str__(self): return self._string @property def is_prerelease(self): raise NotImplementedError('Please implement in subclasses.') class Matcher(object): version_class = None dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?") comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$') num_re = re.compile(r'^\d+(\.\d+)*$') # value is either a callable or the name of a method _operators = { '<': lambda v, c, p: v < c, '>': lambda v, c, p: v > c, '<=': lambda v, c, p: v == c or v < c, '>=': lambda v, c, p: v == c or v > c, '==': lambda v, c, p: v == c, '===': lambda v, c, p: v == c, # by default, compatible => >=. '~=': lambda v, c, p: v == c or v > c, '!=': lambda v, c, p: v != c, } def __init__(self, s): if self.version_class is None: raise ValueError('Please specify a version class') self._string = s = s.strip() m = self.dist_re.match(s) if not m: raise ValueError('Not valid: %r' % s) groups = m.groups('') self.name = groups[0].strip() self.key = self.name.lower() # for case-insensitive comparisons clist = [] if groups[2]: constraints = [c.strip() for c in groups[2].split(',')] for c in constraints: m = self.comp_re.match(c) if not m: raise ValueError('Invalid %r in %r' % (c, s)) groups = m.groups() op = groups[0] or '~=' s = groups[1] if s.endswith('.*'): if op not in ('==', '!='): raise ValueError('\'.*\' not allowed for ' '%r constraints' % op) # Could be a partial version (e.g. for '2.*') which # won't parse as a version, so keep it as a string vn, prefix = s[:-2], True if not self.num_re.match(vn): # Just to check that vn is a valid version self.version_class(vn) else: # Should parse as a version, so we can create an # instance for the comparison vn, prefix = self.version_class(s), False clist.append((op, vn, prefix)) self._parts = tuple(clist) def match(self, version): """ Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: String or :class:`Version` instance. """ if isinstance(version, string_types): version = self.version_class(version) for operator, constraint, prefix in self._parts: f = self._operators.get(operator) if isinstance(f, string_types): f = getattr(self, f) if not f: msg = ('%r not implemented ' 'for %s' % (operator, self.__class__.__name__)) raise NotImplementedError(msg) if not f(version, constraint, prefix): return False return True @property def exact_version(self): result = None if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): result = self._parts[0][1] return result def _check_compatible(self, other): if type(self) != type(other) or self.name != other.name: raise TypeError('cannot compare %s and %s' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self.key == other.key and self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self.key) + hash(self._parts) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._string) def __str__(self): return self._string PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' r'(\.(post)(\d+))?(\.(dev)(\d+))?' r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') def _pep_440_key(s): s = s.strip() m = PEP440_VERSION_RE.match(s) if not m: raise UnsupportedVersionError('Not a valid version: %s' % s) groups = m.groups() nums = tuple(int(v) for v in groups[1].split('.')) while len(nums) > 1 and nums[-1] == 0: nums = nums[:-1] if not groups[0]: epoch = 0 else: epoch = int(groups[0]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] local = groups[13] if pre == (None, None): pre = () else: pre = pre[0], int(pre[1]) if post == (None, None): post = () else: post = post[0], int(post[1]) if dev == (None, None): dev = () else: dev = dev[0], int(dev[1]) if local is None: local = () else: parts = [] for part in local.split('.'): # to ensure that numeric compares as > lexicographic, avoid # comparing them directly, but encode a tuple which ensures # correct sorting if part.isdigit(): part = (1, int(part)) else: part = (0, part) parts.append(part) local = tuple(parts) if not pre: # either before pre-release, or final release and after if not post and dev: # before pre-release pre = ('a', -1) # to sort before a0 else: pre = ('z',) # to sort after all pre-releases # now look at the state of post and dev. if not post: post = ('_',) # sort before 'a' if not dev: dev = ('final',) #print('%s -> %s' % (s, m.groups())) return epoch, nums, pre, post, dev, local _normalized_key = _pep_440_key class NormalizedVersion(Version): """A rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # minimum two numbers 1.2a # release level must have a release serial 1.2.3b """ def parse(self, s): result = _normalized_key(s) # _normalized_key loses trailing zeroes in the release # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 # However, PEP 440 prefix matching needs it: for example, # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). m = PEP440_VERSION_RE.match(s) # must succeed groups = m.groups() self._release_clause = tuple(int(v) for v in groups[1].split('.')) return result PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) @property def is_prerelease(self): return any(t[0] in self.PREREL_TAGS for t in self._parts if t) def _match_prefix(x, y): x = str(x) y = str(y) if x == y: return True if not x.startswith(y): return False n = len(y) return x[n] == '.' class NormalizedMatcher(Matcher): version_class = NormalizedVersion # value is either a callable or the name of a method _operators = { '~=': '_match_compatible', '<': '_match_lt', '>': '_match_gt', '<=': '_match_le', '>=': '_match_ge', '==': '_match_eq', '===': '_match_arbitrary', '!=': '_match_ne', } def _adjust_local(self, version, constraint, prefix): if prefix: strip_local = '+' not in constraint and version._parts[-1] else: # both constraint and version are # NormalizedVersion instances. # If constraint does not have a local component, # ensure the version doesn't, either. strip_local = not constraint._parts[-1] and version._parts[-1] if strip_local: s = version._string.split('+', 1)[0] version = self.version_class(s) return version, constraint def _match_lt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version >= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_gt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version <= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_le(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version <= constraint def _match_ge(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version >= constraint def _match_eq(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version == constraint) else: result = _match_prefix(version, constraint) return result def _match_arbitrary(self, version, constraint, prefix): return str(version) == str(constraint) def _match_ne(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version != constraint) else: result = not _match_prefix(version, constraint) return result def _match_compatible(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version == constraint: return True if version < constraint: return False # if not prefix: # return True release_clause = constraint._release_clause if len(release_clause) > 1: release_clause = release_clause[:-1] pfx = '.'.join([str(i) for i in release_clause]) return _match_prefix(version, pfx) _REPLACEMENTS = ( (re.compile('[.+-]$'), ''), # remove trailing puncts (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start (re.compile('^[.-]'), ''), # remove leading puncts (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha (re.compile(r'\b(pre-alpha|prealpha)\b'), 'pre.alpha'), # standardise (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses ) _SUFFIX_REPLACEMENTS = ( (re.compile('^[:~._+-]+'), ''), # remove leading puncts (re.compile('[,*")([\]]'), ''), # remove unwanted chars (re.compile('[~:+_ -]'), '.'), # replace illegal chars (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\.$'), ''), # trailing '.' ) _NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') def _suggest_semantic_version(s): """ Try to suggest a semantic form for a version for which _suggest_normalized_version couldn't come up with anything. """ result = s.strip().lower() for pat, repl in _REPLACEMENTS: result = pat.sub(repl, result) if not result: result = '0.0.0' # Now look for numeric prefix, and separate it out from # the rest. #import pdb; pdb.set_trace() m = _NUMERIC_PREFIX.match(result) if not m: prefix = '0.0.0' suffix = result else: prefix = m.groups()[0].split('.') prefix = [int(i) for i in prefix] while len(prefix) < 3: prefix.append(0) if len(prefix) == 3: suffix = result[m.end():] else: suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] prefix = prefix[:3] prefix = '.'.join([str(i) for i in prefix]) suffix = suffix.strip() if suffix: #import pdb; pdb.set_trace() # massage the suffix. for pat, repl in _SUFFIX_REPLACEMENTS: suffix = pat.sub(repl, suffix) if not suffix: result = prefix else: sep = '-' if 'dev' in suffix else '+' result = prefix + sep + suffix if not is_semver(result): result = None return result def _suggest_normalized_version(s): """Suggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. """ try: _normalized_key(s) return s # already rational except UnsupportedVersionError: pass rs = s.lower() # part of this could use maketrans for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), ('beta', 'b'), ('rc', 'c'), ('-final', ''), ('-pre', 'c'), ('-release', ''), ('.release', ''), ('-stable', ''), ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), ('final', '')): rs = rs.replace(orig, repl) # if something ends with dev or pre, we add a 0 rs = re.sub(r"pre$", r"pre0", rs) rs = re.sub(r"dev$", r"dev0", rs) # if we have something like "b-2" or "a.2" at the end of the # version, that is probably beta, alpha, etc # let's remove the dash or dot rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) # 1.0-dev-r371 -> 1.0.dev371 # 0.1-dev-r79 -> 0.1.dev79 rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) # Clean: v0.3, v1.0 if rs.startswith('v'): rs = rs[1:] # Clean leading '0's on numbers. #TODO: unintended side-effect on, e.g., "2003.05.09" # PyPI stats: 77 (~2%) better rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers # zero. # PyPI stats: 245 (7.56%) better rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) # the 'dev-rNNN' tag is a dev tag rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) # clean the - when used as a pre delimiter rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) # a terminal "dev" or "devel" can be changed into ".dev0" rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) # a terminal "dev" can be changed into ".dev0" rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) # a terminal "final" or "stable" can be removed rs = re.sub(r"(final|stable)$", "", rs) # The 'r' and the '-' tags are post release tags # 0.4a1.r10 -> 0.4a1.post10 # 0.9.33-17222 -> 0.9.33.post17222 # 0.9.33-r17222 -> 0.9.33.post17222 rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) # Clean 'r' instead of 'dev' usage: # 0.9.33+r17222 -> 0.9.33.dev17222 # 1.0dev123 -> 1.0.dev123 # 1.0.git123 -> 1.0.dev123 # 1.0.bzr123 -> 1.0.dev123 # 0.1a0dev.123 -> 0.1a0.dev123 # PyPI stats: ~150 (~4%) better rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: # 0.2.pre1 -> 0.2c1 # 0.2-c1 -> 0.2c1 # 1.0preview123 -> 1.0c123 # PyPI stats: ~21 (0.62%) better rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) # Tcl/Tk uses "px" for their post release markers rs = re.sub(r"p(\d+)$", r".post\1", rs) try: _normalized_key(rs) except UnsupportedVersionError: rs = None return rs # # Legacy version processing (distribute-compatible) # _VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) _VERSION_REPLACE = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', '': None, '.': None, } def _legacy_key(s): def get_parts(s): result = [] for p in _VERSION_PART.split(s.lower()): p = _VERSION_REPLACE.get(p, p) if p: if '0' <= p[:1] <= '9': p = p.zfill(8) else: p = '*' + p result.append(p) result.append('*final') return result result = [] for p in get_parts(s): if p.startswith('*'): if p < '*final': while result and result[-1] == '*final-': result.pop() while result and result[-1] == '00000000': result.pop() result.append(p) return tuple(result) class LegacyVersion(Version): def parse(self, s): return _legacy_key(s) @property def is_prerelease(self): result = False for x in self._parts: if (isinstance(x, string_types) and x.startswith('*') and x < '*final'): result = True break return result class LegacyMatcher(Matcher): version_class = LegacyVersion _operators = dict(Matcher._operators) _operators['~='] = '_match_compatible' numeric_re = re.compile('^(\d+(\.\d+)*)') def _match_compatible(self, version, constraint, prefix): if version < constraint: return False m = self.numeric_re.match(str(constraint)) if not m: logger.warning('Cannot compute compatible match for version %s ' ' and constraint %s', version, constraint) return True s = m.groups()[0] if '.' in s: s = s.rsplit('.', 1)[0] return _match_prefix(version, s) # # Semantic versioning # _SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) def is_semver(s): return _SEMVER_RE.match(s) def _semantic_key(s): def make_tuple(s, absent): if s is None: result = (absent,) else: parts = s[1:].split('.') # We can't compare ints and strings on Python 3, so fudge it # by zero-filling numeric values so simulate a numeric comparison result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) return result m = is_semver(s) if not m: raise UnsupportedVersionError(s) groups = m.groups() major, minor, patch = [int(i) for i in groups[:3]] # choose the '|' and '*' so that versions sort correctly pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') return (major, minor, patch), pre, build class SemanticVersion(Version): def parse(self, s): return _semantic_key(s) @property def is_prerelease(self): return self._parts[1][0] != '|' class SemanticMatcher(Matcher): version_class = SemanticVersion class VersionScheme(object): def __init__(self, key, matcher, suggester=None): self.key = key self.matcher = matcher self.suggester = suggester def is_valid_version(self, s): try: self.matcher.version_class(s) result = True except UnsupportedVersionError: result = False return result def is_valid_matcher(self, s): try: self.matcher(s) result = True except UnsupportedVersionError: result = False return result def is_valid_constraint_list(self, s): """ Used for processing some metadata fields """ return self.is_valid_matcher('dummy_name (%s)' % s) def suggest(self, s): if self.suggester is None: result = None else: result = self.suggester(s) return result _SCHEMES = { 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, _suggest_normalized_version), 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), 'semantic': VersionScheme(_semantic_key, SemanticMatcher, _suggest_semantic_version), } _SCHEMES['default'] = _SCHEMES['normalized'] def get_scheme(name): if name not in _SCHEMES: raise ValueError('unknown scheme name: %r' % name) return _SCHEMES[name] PK.e[7%distlib/util.pynu[# # Copyright (C) 2012-2016 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs from collections import deque import contextlib import csv from glob import iglob as std_iglob import io import json import logging import os import py_compile import re import shutil import socket try: import ssl except ImportError: # pragma: no cover ssl = None import subprocess import sys import tarfile import tempfile import textwrap try: import threading except ImportError: # pragma: no cover import dummy_threading as threading import time from . import DistlibException from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, xmlrpclib, splittype, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile, fsdecode, unquote) logger = logging.getLogger(__name__) # # Requirement parsing code for name + optional constraints + optional extras # # e.g. 'foo >= 1.2, < 2.0 [bar, baz]' # # The regex can seem a bit hairy, so we build it up out of smaller pieces # which are manageable. # COMMA = r'\s*,\s*' COMMA_RE = re.compile(COMMA) IDENT = r'(\w|[.-])+' EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' VERSPEC = IDENT + r'\*?' RELOP = '([<>=!~]=)|[<>]' # # The first relop is optional - if absent, will be taken as '~=' # BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + RELOP + r')\s*(' + VERSPEC + '))*') DIRECT_REF = '(from\s+(?P.*))' # # Either the bare constraints or the bare constraints in parentheses # CONSTRAINTS = (r'\(\s*(?P' + BARE_CONSTRAINTS + '|' + DIRECT_REF + r')\s*\)|(?P' + BARE_CONSTRAINTS + '\s*)') EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' EXTRAS = r'\[\s*(?P' + EXTRA_LIST + r')?\s*\]' REQUIREMENT = ('(?P' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + CONSTRAINTS + ')?$') REQUIREMENT_RE = re.compile(REQUIREMENT) # # Used to scan through the constraints # RELOP_IDENT = '(?P' + RELOP + r')\s*(?P' + VERSPEC + ')' RELOP_IDENT_RE = re.compile(RELOP_IDENT) def parse_requirement(s): def get_constraint(m): d = m.groupdict() return d['op'], d['vn'] result = None m = REQUIREMENT_RE.match(s) if m: d = m.groupdict() name = d['dn'] cons = d['c1'] or d['c2'] if not d['diref']: url = None else: # direct reference cons = None url = d['diref'].strip() if not cons: cons = None constr = '' rs = d['dn'] else: if cons[0] not in '<>!=': cons = '~=' + cons iterator = RELOP_IDENT_RE.finditer(cons) cons = [get_constraint(m) for m in iterator] rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) if not d['ex']: extras = None else: extras = COMMA_RE.split(d['ex']) result = Container(name=name, constraints=cons, extras=extras, requirement=rs, source=s, url=url) return result def get_resources_dests(resources_root, rules): """Find destinations for resources files""" def get_rel_path(base, path): # normalizes and returns a lstripped-/-separated path base = base.replace(os.path.sep, '/') path = path.replace(os.path.sep, '/') assert path.startswith(base) return path[len(base):].lstrip('/') destinations = {} for base, suffix, dest in rules: prefix = os.path.join(resources_root, base) for abs_base in iglob(prefix): abs_glob = os.path.join(abs_base, suffix) for abs_path in iglob(abs_glob): resource_file = get_rel_path(resources_root, abs_path) if dest is None: # remove the entry if it was here destinations.pop(resource_file, None) else: rel_path = get_rel_path(abs_base, abs_path) rel_dest = dest.replace(os.path.sep, '/').rstrip('/') destinations[resource_file] = rel_dest + '/' + rel_path return destinations def in_venv(): if hasattr(sys, 'real_prefix'): # virtualenv venvs result = True else: # PEP 405 venvs result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) return result def get_executable(): # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as # changes to the stub launcher mean that sys.executable always points # to the stub on macOS # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' # in os.environ): # result = os.environ['__PYVENV_LAUNCHER__'] # else: # result = sys.executable # return result result = os.path.normcase(sys.executable) if not isinstance(result, text_type): result = fsdecode(result) return result def proceed(prompt, allowed_chars, error_prompt=None, default=None): p = prompt while True: s = raw_input(p) p = prompt if not s and default: s = default if s: c = s[0].lower() if c in allowed_chars: break if error_prompt: p = '%c: %s\n%s' % (c, error_prompt, prompt) return c def extract_by_key(d, keys): if isinstance(keys, string_types): keys = keys.split() result = {} for key in keys: if key in d: result[key] = d[key] return result def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) # Try to load as JSON, falling back on legacy format data = stream.read() stream = StringIO(data) try: jdata = json.load(stream) result = jdata['extensions']['python.exports']['exports'] for group, entries in result.items(): for k, v in entries.items(): s = '%s = %s' % (k, v) entry = get_export_entry(s) assert entry is not None entries[k] = entry return result except Exception: stream.seek(0, 0) def read_stream(cp, stream): if hasattr(cp, 'read_file'): cp.read_file(stream) else: cp.readfp(stream) cp = configparser.ConfigParser() try: read_stream(cp, stream) except configparser.MissingSectionHeaderError: stream.close() data = textwrap.dedent(data) stream = StringIO(data) read_stream(cp, stream) result = {} for key in cp.sections(): result[key] = entries = {} for name, value in cp.items(key): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None #entry.dist = self entries[name] = entry return result def write_exports(exports, stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getwriter('utf-8')(stream) cp = configparser.ConfigParser() for k, v in exports.items(): # TODO check k, v for valid values cp.add_section(k) for entry in v.values(): if entry.suffix is None: s = entry.prefix else: s = '%s:%s' % (entry.prefix, entry.suffix) if entry.flags: s = '%s [%s]' % (s, ', '.join(entry.flags)) cp.set(k, entry.name, s) cp.write(stream) @contextlib.contextmanager def tempdir(): td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) @contextlib.contextmanager def chdir(d): cwd = os.getcwd() try: os.chdir(d) yield finally: os.chdir(cwd) @contextlib.contextmanager def socket_timeout(seconds=15): cto = socket.getdefaulttimeout() try: socket.setdefaulttimeout(seconds) yield finally: socket.setdefaulttimeout(cto) class cached_property(object): def __init__(self, func): self.func = func #for attr in ('__name__', '__module__', '__doc__'): # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) #obj.__dict__[self.func.__name__] = value = self.func(obj) return value def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths) class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime def copy_file(self, infile, outfile, check=True): """Copy a file respecting dry-run and force flags. """ self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if not self.dry_run: msg = None if check: if os.path.islink(outfile): msg = '%s is a symlink' % outfile elif os.path.exists(outfile) and not os.path.isfile(outfile): msg = '%s is a non-regular file' % outfile if msg: raise ValueError(msg + ' which would be overwritten') shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert not os.path.isdir(outfile) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if not self.dry_run: if encoding is None: outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data.encode(encoding)) self.record_as_written(path) def set_mode(self, bits, mask, files): if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): # Set the executable bits (owner, group, and world) on # all the files specified. for f in files: if self.dry_run: logger.info("changing mode of %s", f) else: mode = (os.stat(f).st_mode | bits) & mask logger.info("changing mode of %s to %o", f, mode) os.chmod(f, mode) set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) def ensure_dir(self, path): path = os.path.abspath(path) if path not in self.ensured and not os.path.exists(path): self.ensured.add(path) d, f = os.path.split(path) self.ensure_dir(d) logger.info('Creating %s' % path) if not self.dry_run: os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None): dpath = cache_from_source(path, not optimize) logger.info('Byte-compiling %s to %s', path, dpath) if not self.dry_run: if force or self.newer(path, dpath): if not prefix: diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] py_compile.compile(path, dpath, diagpath, True) # raise error self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if os.path.isdir(path) and not os.path.islink(path): logger.debug('Removing directory tree at %s', path) if not self.dry_run: shutil.rmtree(path) if self.record: if path in self.dirs_created: self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if not self.dry_run: os.remove(path) if self.record: if path in self.files_written: self.files_written.remove(path) def is_writable(self, path): result = False while not result: if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if parent == path: break path = parent return result def commit(self): """ Commit recorded changes, turn off recording, return changes. """ assert self.record result = self.files_written, self.dirs_created self._init_record() return result def rollback(self): if not self.dry_run: for f in list(self.files_written): if os.path.exists(f): os.remove(f) # dirs should all be empty now, except perhaps for # __pycache__ subdirs # reverse so that subdirs appear before their parents dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) # should fail if non-empty self._init_record() def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result class ExportEntry(object): def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix self.suffix = suffix self.flags = flags @cached_property def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): # pragma: no cover return '' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+) \s*=\s*(?P(\w+)([:\.]\w+)*) \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: result = None if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) else: d = m.groupdict() name = d['name'] path = d['callable'] colons = path.count(':') if colons == 0: prefix, suffix = path, None else: if colons != 1: raise DistlibException("Invalid specification " "'%s'" % specification) prefix, suffix = path.split(':') flags = d['flags'] if flags is None: if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) flags = [] else: flags = [f.strip() for f in flags.split(',')] result = ExportEntry(name, prefix, suffix, flags) return result def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix) def path_to_cache_dir(path): """ Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ d, p = os.path.splitdrive(os.path.abspath(path)) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') return d + p + '.cache' def ensure_slash(s): if not s.endswith('/'): return s + '/' return s def parse_credentials(netloc): username = password = None if '@' in netloc: prefix, netloc = netloc.split('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) return username, password, netloc def get_process_umask(): result = os.umask(0o22) os.umask(result) return result def is_string_sequence(seq): result = True i = None for i, s in enumerate(seq): if not isinstance(s, string_types): result = False break assert i is not None return result PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') def split_filename(filename, project_name=None): """ Extract name, version, python version from a filename (no extension) Return name, version, pyver or None """ result = None pyver = None filename = unquote(filename).replace(' ', '-') m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' r'\(\s*(?P[^\s)]+)\)$') def parse_name_and_version(p): """ A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. """ m = NAME_VERSION_RE.match(p) if not m: raise DistlibException('Ill-formed name/version string: \'%s\'' % p) d = m.groupdict() return d['name'].strip().lower(), d['ver'] def get_extras(requested, available): result = set() requested = set(requested or []) available = set(available or []) if '*' in requested: requested.remove('*') result |= available for r in requested: if r == '-': result.add(r) elif r.startswith('-'): unwanted = r[1:] if unwanted not in available: logger.warning('undeclared extra: %s' % unwanted) if unwanted in result: result.remove(unwanted) else: if r not in available: logger.warning('undeclared extra: %s' % r) result.add(r) return result # # Extended metadata functionality # def _get_external_data(url): result = {} try: # urlopen might fail if it runs into redirections, # because of Python issue #13696. Fixed in locators # using a custom redirect handler. resp = urlopen(url) headers = resp.info() ct = headers.get('Content-Type') if not ct.startswith('application/json'): logger.debug('Unexpected response for JSON request: %s', ct) else: reader = codecs.getreader('utf-8')(resp) #data = reader.read().decode('utf-8') #result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' def get_project_data(name): url = '%s/%s/project.json' % (name[0].upper(), name) url = urljoin(_external_data_base_url, url) result = _get_external_data(url) return result def get_package_data(name, version): url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) url = urljoin(_external_data_base_url, url) return _get_external_data(url) class Cache(object): """ A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. """ def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): # pragma: no cover os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix) def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed class EventMixin(object): """ A very simple publish/subscribe system. """ def __init__(self): self._subscribers = {} def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber) def remove(self, event, subscriber): """ Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed. """ subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber) def get_subscribers(self, event): """ Return an iterator for the subscribers for an event. :param event: The event to return subscribers for. """ return iter(self._subscribers.get(event, ())) def publish(self, event, *args, **kwargs): """ Publish a event and return a list of values returned by its subscribers. :param event: The event to publish. :param args: The positional arguments to pass to the event's subscribers. :param kwargs: The keyword arguments to pass to the event's subscribers. """ result = [] for subscriber in self.get_subscribers(event): try: value = subscriber(event, *args, **kwargs) except Exception: logger.exception('Exception during event publication') value = None result.append(value) logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result # # Simple sequencing # class Sequencer(object): def __init__(self): self._preds = {} self._succs = {} self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) def remove_node(self, node, edges=False): if node in self._nodes: self._nodes.remove(node) if edges: for p in set(self._preds.get(node, ())): self.remove(p, node) for s in set(self._succs.get(node, ())): self.remove(node, s) # Remove empties for k, v in list(self._preds.items()): if not v: del self._preds[k] for k, v in list(self._succs.items()): if not v: del self._succs[k] def add(self, pred, succ): assert pred != succ self._preds.setdefault(succ, set()).add(pred) self._succs.setdefault(pred, set()).add(succ) def remove(self, pred, succ): assert pred != succ try: preds = self._preds[succ] succs = self._succs[pred] except KeyError: # pragma: no cover raise ValueError('%r not a successor of anything' % succ) try: preds.remove(pred) succs.remove(succ) except KeyError: # pragma: no cover raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): raise ValueError('Unknown: %r' % final) result = [] todo = [] seen = set() todo.append(final) while todo: step = todo.pop(0) if step in seen: # if a step was already seen, # move it to the end (so it will appear earlier # when reversed on return) ... but not for the # final step, as that would be confusing for # users if step != final: result.remove(step) result.append(step) else: seen.add(step) result.append(step) preds = self._preds.get(step, ()) todo.extend(preds) return reversed(result) @property def strong_connections(self): #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} index = {} result = [] graph = self._succs def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors try: successors = graph[node] except Exception: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) lowlinks[node] = min(lowlinks[node],lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node],index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result @property def dot(self): result = ['digraph G {'] for succ in self._preds: preds = self._preds[succ] for pred in preds: result.append(' %s -> %s;' % (pred, succ)) for node in self._nodes: result.append(' %s;' % node) result.append('}') return '\n'.join(result) # # Unarchiving functionality for zip, tar, tgz, tbz, whl # ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') def unarchive(archive_filename, dest_dir, format=None, check=True): def check_path(path): if not isinstance(path, text_type): path = path.decode('utf-8') p = os.path.abspath(os.path.join(dest_dir, path)) if not p.startswith(dest_dir) or p[plen] != os.sep: raise ValueError('path outside destination: %r' % p) dest_dir = os.path.abspath(dest_dir) plen = len(dest_dir) archive = None if format is None: if archive_filename.endswith(('.zip', '.whl')): format = 'zip' elif archive_filename.endswith(('.tar.gz', '.tgz')): format = 'tgz' mode = 'r:gz' elif archive_filename.endswith(('.tar.bz2', '.tbz')): format = 'tbz' mode = 'r:bz2' elif archive_filename.endswith('.tar'): format = 'tar' mode = 'r' else: # pragma: no cover raise ValueError('Unknown format for %r' % archive_filename) try: if format == 'zip': archive = ZipFile(archive_filename, 'r') if check: names = archive.namelist() for name in names: check_path(name) else: archive = tarfile.open(archive_filename, mode) if check: names = archive.getnames() for name in names: check_path(name) if format != 'zip' and sys.version_info[0] < 3: # See Python issue 17153. If the dest path contains Unicode, # tarfile extraction fails on Python 2.x if a member path name # contains non-ASCII characters - it leads to an implicit # bytes -> unicode conversion using ASCII to decode. for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') archive.extractall(dest_dir) finally: if archive: archive.close() def zip_dir(directory): """zip a directory tree into a BytesIO object""" result = io.BytesIO() dlen = len(directory) with ZipFile(result, "w") as zf: for root, dirs, files in os.walk(directory): for name in files: full = os.path.join(root, name) rel = root[dlen:] dest = os.path.join(rel, name) zf.write(full, dest) return result # # Simple progress bar # UNITS = ('', 'K', 'M', 'G','T','P') class Progress(object): unknown = 'UNKNOWN' def __init__(self, minval=0, maxval=100): assert maxval is None or maxval >= minval self.min = self.cur = minval self.max = maxval self.started = None self.elapsed = 0 self.done = False def update(self, curval): assert self.min <= curval assert self.max is None or curval <= self.max self.cur = curval now = time.time() if self.started is None: self.started = now else: self.elapsed = now - self.started def increment(self, incr): assert incr >= 0 self.update(self.cur + incr) def start(self): self.update(self.min) return self def stop(self): if self.max is not None: self.update(self.max) self.done = True @property def maximum(self): return self.unknown if self.max is None else self.max @property def percentage(self): if self.done: result = '100 %' elif self.max is None: result = ' ?? %' else: v = 100.0 * (self.cur - self.min) / (self.max - self.min) result = '%3d %%' % v return result def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' #elif duration < 1: # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @property def ETA(self): if self.done: prefix = 'Done' t = self.elapsed #import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: t = -1 elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: #import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed return '%s: %s' % (prefix, self.format_duration(t)) @property def speed(self): if self.elapsed == 0: result = 0.0 else: result = (self.cur - self.min) / self.elapsed for unit in UNITS: if result < 1000: break result /= 1000.0 return '%d %sB/s' % (result, unit) # # Glob functionality # RICH_GLOB = re.compile(r'\{([^}]*)\}') _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') def iglob(path_glob): """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" if _CHECK_RECURSIVE_GLOB.search(path_glob): msg = """invalid glob %r: recursive glob "**" must be used alone""" raise ValueError(msg % path_glob) if _CHECK_MISMATCH_SET.search(path_glob): msg = """invalid glob %r: mismatching set marker '{' or '}'""" raise ValueError(msg % path_glob) return _iglob(path_glob) def _iglob(path_glob): rich_path_glob = RICH_GLOB.split(path_glob, 1) if len(rich_path_glob) > 1: assert len(rich_path_glob) == 3, rich_path_glob prefix, set, suffix = rich_path_glob for item in set.split(','): for path in _iglob(''.join((prefix, item, suffix))): yield path else: if '**' not in path_glob: for item in std_iglob(path_glob): yield item else: prefix, radical = path_glob.split('**', 1) if prefix == '': prefix = '.' if radical == '': radical = '*' else: # we support both radical = radical.lstrip('/') radical = radical.lstrip('\\') for path, dir, files in os.walk(prefix): path = os.path.normpath(path) for fn in _iglob(os.path.join(path, radical)): yield fn if ssl: from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError) # # HTTPSConnection which verifies certificates/matches domains # class HTTPSConnection(httplib.HTTPSConnection): ca_certs = None # set this to the path to the certs file (.pem) check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() if not hasattr(ssl, 'SSLContext'): # For 2.x if self.ca_certs: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=cert_reqs, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.ca_certs) else: # pragma: no cover context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} if self.ca_certs: context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(cafile=self.ca_certs) if getattr(ssl, 'HAS_SNI', False): kwargs['server_hostname'] = self.host self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) logger.debug('Host verified: %s', self.host) except CertificateError: # pragma: no cover self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise class HTTPSHandler(BaseHTTPSHandler): def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs self.check_domain = check_domain def _conn_maker(self, *args, **kwargs): """ This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. """ result = HTTPSConnection(*args, **kwargs) if self.ca_certs: result.ca_certs = self.ca_certs result.check_domain = self.check_domain return result def https_open(self, req): try: return self.do_open(self._conn_maker, req) except URLError as e: if 'certificate verify failed' in str(e.reason): raise CertificateError('Unable to verify server certificate ' 'for %s' % req.host) else: raise # # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves # HTML containing a http://xyz link when it should be https://xyz), # you can use the following handler class, which does not allow HTTP traffic. # # It works by inheriting from HTTPHandler - so build_opener won't add a # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) # # XML-RPC with timeouts # _ver_info = sys.version_info[:2] if _ver_info == (2, 6): class HTTP(httplib.HTTP): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) if ssl: class HTTPS(httplib.HTTPS): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) if _ver_info == (2, 6): result = HTTP(h, timeout=self.timeout) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPConnection(h) result = self._connection[1] return result if ssl: class SafeTransport(xmlrpclib.SafeTransport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) def make_connection(self, host): h, eh, kwargs = self.get_host_info(host) if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout if _ver_info == (2, 6): result = HTTPS(host, None, **kwargs) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) result = self._connection[1] return result class ServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: scheme, _ = splittype(uri) use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport else: tcls = Transport kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' else: kwargs['newline'] = '' return open(fn, mode, **kwargs) class CSVBase(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() class CSVReader(CSVBase): def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) self.stream = stream else: self.stream = _csv_open(kwargs['path'], 'r') self.reader = csv.reader(self.stream, **self.defaults) def __iter__(self): return self def next(self): result = next(self.reader) if sys.version_info[0] < 3: for i, item in enumerate(result): if not isinstance(item, text_type): result[i] = item.decode('utf-8') return result __next__ = next class CSVWriter(CSVBase): def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) def writerow(self, row): if sys.version_info[0] < 3: r = [] for item in row: if isinstance(item, text_type): item = item.encode('utf-8') r.append(item) row = r self.writer.writerow(row) # # Configurator functionality # class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) value_converters['inc'] = 'inc_convert' def __init__(self, config, base=None): super(Configurator, self).__init__(config) self.base = base or os.getcwd() def configure_custom(self, config): def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) elif isinstance(o, dict): if '()' in o: result = self.configure_custom(o) else: result = {} for k in o: result[k] = convert(o[k]) else: result = self.convert(o) return result c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers args = config.pop('[]', ()) if args: args = tuple([convert(o) for o in args]) items = [(k, convert(config[k])) for k in config if valid_ident(k)] kwargs = dict(items) result = c(*args, **kwargs) if props: for n, v in props.items(): setattr(result, n, convert(v)) return result def __getitem__(self, key): result = self.config[key] if isinstance(result, dict) and '()' in result: self.config[key] = result = self.configure_custom(result) return result def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result # # Mixin for running subprocesses and capturing their output # class SubprocessMixin(object): def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress def reader(self, stream, context): """ Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. """ progress = self.progress verbose = self.verbose while True: s = stream.readline() if not s: break if progress is not None: progress(s, context) else: if not verbose: sys.stderr.write('.') else: sys.stderr.write(s.decode('utf-8')) sys.stderr.flush() stream.close() def run_command(self, cmd, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) t2.start() p.wait() t1.join() t2.join() if self.progress is not None: self.progress('done.', 'main') elif self.verbose: sys.stderr.write('done.\n') return p def normalize_name(name): """Normalize a python package name a la PEP 503""" # https://www.python.org/dev/peps/pep-0503/#normalized-names return re.sub('[-_.]+', '-', name).lower() PK.e["\N\Ndistlib/index.pycnu[ abc@sddlZddlZddlZddlZddlZddlZyddlmZWn!ek rddl mZnXddl m Z ddl m Z mZmZmZmZmZddlmZmZmZejeZdZdZd efd YZdS( iN(tThreadi(tDistlibException(tHTTPBasicAuthHandlertRequesttHTTPPasswordMgrturlparset build_openert string_types(tcached_propertytzip_dirt ServerProxyshttps://pypi.python.org/pypitpypit PackageIndexcBseZdZdZddZdZdZdZdZ dZ dZ dd Z dd Z dd Zddd d ddZdZddZddZdddZdZdZddZRS(sc This class represents a package index compatible with PyPI, the Python Package Index. s.----------ThIs_Is_tHe_distlib_index_bouNdaRY_$c Cs|p t|_|jt|j\}}}}}}|sX|sX|sX|d krntd|jnd |_d |_d |_d |_ d |_ t t j dj}x`d D]X} y>tj| dgd|d |} | d kr| |_PnWqtk rqXqWWd QXd S(s Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. thttpthttpssinvalid repository: %stwtgpgtgpg2s --versiontstdouttstderriN(R R(RR(t DEFAULT_INDEXturltread_configurationRRtNonetpassword_handlert ssl_verifierRtgpg_homet rpc_proxytopentostdevnullt subprocesst check_calltOSError( tselfRtschemetnetloctpathtparamstquerytfragtsinktstrc((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt__init__$s( !          cCs3ddlm}ddlm}|}||S(ss Get the distutils command for interacting with PyPI configurations. :return: the command. i(t Distribution(t PyPIRCCommand(tdistutils.coreR-tdistutils.configR.(R"R-R.td((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt_get_pypirc_commandBs cCsy|j}|j|_|j}|jd|_|jd|_|jdd|_|jd|j|_dS(s Read the PyPI access configuration as supported by distutils, getting PyPI to do the actual work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. tusernametpasswordtrealmR t repositoryN(R2RR6t _read_pypirctgetR3R4R5(R"tctcfg((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRLs   cCs0|j|j}|j|j|jdS(s Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. N(tcheck_credentialsR2t _store_pypircR3R4(R"R9((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytsave_configuration[s  cCs|jdks|jdkr-tdnt}t|j\}}}}}}|j|j||j|jt ||_ dS(sp Check that ``username`` and ``password`` have been set, and raise an exception if not. s!username and password must be setN( R3RR4RRRRt add_passwordR5RR(R"tpmt_R$((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyR;gs  !cCs|j|j|j}d|d<|j|jg}|j|}d|d<|j|jg}|j|S(sq Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. tverifys:actiontsubmit(R;tvalidatettodicttencode_requesttitemst send_request(R"tmetadataR1trequesttresponse((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytregisterss     cCsjxYtr[|j}|sPn|jdj}|j|tjd||fqW|jdS(sr Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. sutf-8s%s: %sN(tTruetreadlinetdecodetrstriptappendtloggertdebugtclose(R"tnametstreamtoutbufR*((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt_readers   cCs|jdddg}|dkr-|j}n|rI|jd|gn|dk rn|jdddgntj}tjj|tjj |d}|jd d d |d ||gt j d dj|||fS(s Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. s --status-fdt2s--no-ttys --homedirs--batchs--passphrase-fdt0s.ascs --detach-signs--armors --local-users--outputs invoking: %st N( RRRtextendttempfiletmkdtempRR%tjointbasenameRQRR(R"tfilenametsignert sign_passwordtkeystoretcmdttdtsf((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytget_sign_commands    %c Cs itjd6tjd6}|dk r6tj|d        c Cs|jtjj|s/td|ntjj|d}tjj|sitd|n|j|j|j }}t |j }d d|fd|fg}d||fg}|j ||} |j | S( s2 Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. snot a directory: %rs index.htmls not found: %rs:actiont doc_uploadRTtversionR(s:actionR(R;RR%tisdirRR^RRCRTRR tgetvalueRERG( R"RHtdoc_dirtfnRTRtzip_datatfieldsRRI((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytupload_documentation)s  cCs||jdddg}|dkr-|j}n|rI|jd|gn|jd||gtjddj||S( s| Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. s --status-fdRXs--no-ttys --homedirs--verifys invoking: %sRZN(RRRR[RQRRR^(R"tsignature_filenamet data_filenameRcRd((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytget_verify_commandEs  cCsn|jstdn|j|||}|j|\}}}|dkrdtd|n|dkS(s6 Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False. s0verification unavailable because gpg unavailableiis(verify command failed with error code %s(ii(RRRRv(R"RRRcRdR+RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytverify_signature]s     cCs |d kr"d }tjdnMt|ttfrF|\}}nd}tt|}tjd|t|d}|j t |}z|j } d} d} d} d} d| krt | d } n|r|| | | nxyt rp|j| }|sPn| t|7} |j||rJ|j|n| d 7} |r|| | | qqWWd |jXWd QX| dkr| | krtd | | fn|r|j}||krtd ||||fntjd|nd S(s This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. sNo digest specifiedRsDigest specified: %stwbi iiscontent-lengthsContent-LengthiNs1retrieval incomplete: got only %d out of %d bytess.%s digest mismatch for %s: expected %s, got %ssDigest verified: %s(RRQRRt isinstancetlistttupletgetattrRRRGRtinfotintRLRtlenRnRRSRR(R"Rtdestfiletdigestt reporthooktdigesterthashertdfptsfptheaderst blocksizetsizeRtblocknumtblocktactual((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt download_filevsV        cCsWg}|jr"|j|jn|jr>|j|jnt|}|j|S(s Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). (RRPRRR(R"treqthandlerstopener((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRGs   cCs<g}|j}xy|D]q\}}t|ttfsC|g}nxA|D]9}|jd|d|jdd|jdfqJWqWxG|D]?\}} } |jd|d|| fjdd| fqW|jd|ddfdj|} d|} i| d6tt| d 6} t |j | | S( s& Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. s--s)Content-Disposition: form-data; name="%s"sutf-8ts8Content-Disposition: form-data; name="%s"; filename="%s"s smultipart/form-data; boundary=s Content-typesContent-length( tboundaryRRRR[RwR^tstrRRR(R"RRtpartsRtktvaluestvtkeyR`tvaluetbodytctR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyREs4      cCsbt|tri|d6}n|jdkrIt|jdd|_n|jj||p^dS(NRTttimeoutg@tand(RRRRR Rtsearch(R"ttermstoperator((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRs N(t__name__t __module__t__doc__RRR,R2RR=R;RKRWRgRvRyRRRRRRGRER(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyR s*      # 8   M  +(RtloggingRRRR\t threadingRt ImportErrortdummy_threadingRRtcompatRRRRRRtutilRR R t getLoggerRRQRt DEFAULT_REALMtobjectR (((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyts       .PK.e[zoodistlib/markers.pyonu[ abc@sdZddlZddlZddlZddlZddlmZmZddlm Z dgZ de fdYZ dd ZdS( sEParser for the environment markers micro-language defined in PEP 345.iNi(tpython_implementationt string_types(tin_venvt interprett EvaluatorcBs^eZdZi dd6dd6dd6dd6d d 6d d 6d d6dd6dd6Zi ejd6dejd d6ejjdddd6e j d6e e d6ej d6ejd6ejd6ed 6Zd,d!Zd"Zd#Zd,d$Zd%Zd&Zd'Zd(Zd)Zd*Zd+ZRS(-s5 A limited evaluator for Python expressions. cCs ||kS(N((txty((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyttteqcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtgtcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtgtecCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtincCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtltcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtltecCs| S(N((R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR RtnotcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR!RtnoteqcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR"Rtnotint sys_platforms%s.%sitpython_versiont iitpython_full_versiontos_nametplatform_in_venvtplatform_releasetplatform_versiontplatform_machinetplatform_python_implementationcCs|p i|_d|_dS(su Initialise an instance. :param context: If specified, names are looked up in this mapping. N(tcontexttNonetsource(tselfR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt__init__3scCsHd}d|j|||!}||t|jkrD|d7}n|S(sH Get the part of the source which is causing a problem. i s%rs...(Rtlen(Rtoffsett fragment_lents((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt get_fragment<s  cCst|d|dS(s@ Get a handler for the specified AST node type. sdo_%sN(tgetattrR(Rt node_type((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt get_handlerFscCst|tr||_idd6}|r8||dR%R3R/(tlhsnodetrhsnodeR@R$(R4R(s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt sanity_checks $ sunsupported operation: %r( tleftR8RLtziptopst comparatorsR0R1R2t operatorsR/( RR4RPRNtlhsRBREROtrhs((R4Rs?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_compares  "  cCs|j|jS(N(R8tbody(RR4((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_expressionscCs|t}|j|jkr1t}|j|j}n+|j|jkr\t}|j|j}n|sxtd|jn|S(Nsinvalid expression: %s(R>R:RRLR?R/(RR4R@RB((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pytdo_namescCs|jS(N(R$(RR4((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pytdo_strsN(R1t __module__t__doc__RUtsystplatformt version_infotversiontsplittostnametstrRtreleasetmachineRR?RR R%R(R8R<RCRKRXRZR[R\(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRs@                      cCst|j|jS(s Interpret a marker and return a result depending on environment. :param marker: The marker to interpret. :type marker: str :param execution_context: The context used for name lookup. :type execution_context: mapping (RR8tstrip(tmarkertexecution_context((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRs (R^R-RdR_R`tcompatRRtutilRt__all__tobjectRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyts     PK.e[^00distlib/scripts.pycnu[ abc@sddlmZddlZddlZddlZddlZddlZddlmZm Z m Z ddl m Z ddl mZmZmZmZmZejeZdjZejdZd Zd Zd efd YZdS( i(tBytesIONi(t sysconfigtdetect_encodingtZipFile(tfinder(t FileOperatortget_export_entryt convert_pathtget_executabletin_venvs s^#!.*pythonw?[0-9.]*([ ].*)?$s|# -*- coding: utf-8 -*- if __name__ == '__main__': import sys, re def _resolve(module, func): __import__(module) mod = sys.modules[module] parts = func.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result try: sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) func = _resolve('%(module)s', '%(func)s') rc = func() # None interpreted as 0 except Exception as e: # only supporting Python >= 2.6 sys.stderr.write('%%s\n' %% e) rc = 1 sys.exit(rc) cCsd|kr|jdre|jdd\}}d|kr|jd rd||f}qq|jdsd|}qn|S(Nt s /usr/bin/env it"s%s "%s"s"%s"(t startswithtsplit(t executabletenvt _executable((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_enquote_executableBs t ScriptMakercBseZdZeZdZeeddZ dZ e j j drZdZdZndddZdZeZd Zd Zdd Zd Zed ZejdZejdksejdkrejdkrdZnddZddZ RS(s_ A class to copy or create scripts from source scripts or callable specifications. cCs||_||_||_t|_t|_tjdkpWtjdkoWtjdk|_ t d|_ |p{t ||_ tjdkptjdkotjdk|_dS(NtposixtjavatsX.Ytnt(RsX.Y(t source_dirt target_dirt add_launcherstFalsetforcetclobbertostnamet_nametset_modetsettvariantsRt_fileopt_is_nt(tselfRRRtdry_runtfileop((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt__init__[s     cCsa|jdtr]|jr]tjj|\}}|jdd}tjj||}n|S(Ntguitpythontpythonw(tgetRR$RtpathR treplacetjoin(R%Rtoptionstdntfn((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_get_alternate_executableks RcCs[y,t|}|jddkSWdQXWn(ttfk rVtjd|tSXdS(sl Determine if the specified executable is a script (contains a #! line) is#!NsFailed to open %s(topentreadtOSErrortIOErrortloggertwarningR(R%Rtfp((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _is_shellss cCs^|j|r=ddl}|jjjddkrV|Sn|jjdrV|Sd|S(Nisos.nametLinuxs jython.exes/usr/bin/env %s(R;RtlangtSystemt getPropertytlowertendswith(R%RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_fix_jython_executables RcCst}|jr!|j}t}ntjs9t}nqtrptjj tj ddtj d}n:tjj tj ddtj dtj df}|r|j ||}nt jjdr|j|}ntjj|}|rt|}n|jd}t jd krSd |krSd |krS|d 7}nd ||d}y|jdWn!tk rtd|nX|dkry|j|Wqtk rtd||fqXn|S(Ntscriptsspython%stEXEtBINDIRs python%s%stVERSIONRsutf-8tclis -X:Framess -X:FullFramess -X:Framess#!s s,The shebang (%r) is not decodable from utf-8s?The shebang (%r) is not decodable from the script encoding (%r)(tTrueRRRtis_python_buildRR RR-R/tget_pathtget_config_varR3tsystplatformR RBtnormcaseRtencodetdecodetUnicodeDecodeErrort ValueError(R%tencodingt post_interpR0tenquoteRtshebang((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _get_shebangsL             cCs |jtd|jd|jS(Ntmoduletfunc(tscript_templatetdicttprefixtsuffix(R%tentry((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_get_script_textscCstjj|}|j|S(N(RR-tbasenametmanifest(R%texenametbase((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt get_manifestscCs|jo|j}tjjd}|s;|||}n||dkrY|jd}n|jd}t} t| d} | jd|WdQX| j } |||| }x|D]} tj j |j | } |rtj j | \}}|jdr|} nd| } y|jj| |Wqltk rtjdd | }tj j|r|tj|ntj| ||jj| |tjd ytj|Wqtk rqXqlXn|jr| jd | rd | |f} ntj j| r:|j r:tjd | qn|jj| ||jrl|jj| gn|j| qWdS(Nsutf-8tpytttws __main__.pys.pys%s.exes:Failed to write executable - trying to use .deleteme logics %s.deletemes0Able to replace executable using .deleteme logict.s%s.%ssSkipping existing file %s(RR$RtlinesepROt _get_launcherRRtwritestrtgetvalueR-R/RtsplitextR R#twrite_binary_filet ExceptionR8R9texiststremovetrenametdebugRARR tset_executable_modetappend(R%tnamesRVt script_bytest filenamestextt use_launcherRitlaunchertstreamtzftzip_dataRtoutnametntetdfname((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _write_scriptsT             c CsQd}|rL|jdg}|rLddj|}|jd}qLn|jd|d|}|j|jd}|j}t} d|jkr| j|nd|jkr| jd|t j d fnd |jkr | jd |t j d fn|r.|jd t r.d} nd} |j | |||| dS(NRtinterpreter_argss %sR sutf-8R0tXs%s%sisX.Ys%s-%siR)tpywRe( R,R/RORWR_RR!R"taddRLtversionRR( R%R^RxR0RTtargsRVtscriptRt scriptnamesRy((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _make_scripts(  !! cCs@t}tjj|jt|}tjj|jtjj|}|j r||j j || r|t j d|dSyt |d}Wn&tk r|jsnd}noX|j}|st jd|j|dStj|jdd}|r&t}|jdp d}n|s|r?|jn|j j|||jrq|j j|gn|j|nt jd||j|j js)t|j\} } |j d |j!| |} d |krd } nd } tjj|} |j"| g| |j#|| n|r<|jndS( Nsnot copying %s (up-to-date)trbs"%s: %s is an empty file (skipping)s s iRscopying and adjusting %s -> %siR+RRe($RRR-R/RRRR`RR#tnewerR8RsR4R7R&tNonetreadlineR9tget_command_namet FIRST_LINE_REtmatchR.RHtgrouptcloset copy_fileR RtRutinfoRtseekRWRR5(R%RRxtadjustRtft first_lineRRTRStlinesRVRyR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _copy_scriptsR$              %cCs |jjS(N(R#R&(R%((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyR&JscCs||j_dS(N(R#R&(R%tvalue((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyR&NsRcCsftjddkrd}nd}d||f}tjddd}t|j|j}|S( NtPit64t32s%s%s.exeRhii(tstructtcalcsizet__name__trsplitRtfindtbytes(R%tkindtbitsRtdistlib_packagetresult((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyRjVs cCsKg}t|}|dkr1|j||n|j||d||S(s Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. R0N(RRRR(R%t specificationR0RxR^((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pytmakeds   cCs4g}x'|D]}|j|j||q W|S(s Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, (textendR(R%tspecificationsR0RxR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt make_multiplews N(!Rt __module__t__doc__tSCRIPT_TEMPLATERZRRRHRR(R3RLRMR R;RBRWR_t_DEFAULT_MANIFESTRaRdRRRtpropertyR&tsetterRRRRjRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyRRs,  8   2  4-  (tioRtloggingRtreRRLtcompatRRRt resourcesRtutilRRRRR t getLoggerRR8tstripRtcompileRRRtobjectR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyts     (  PK.e[rjz/z/distlib/manifest.pycnu[ abc@sdZddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z dgZ ej eZejdejZejd ejejBZejd Zdefd YZdS( su Class representing the list of files in a distribution. Equivalent to distutils.filelist, but fixes some problems. iNi(tDistlibException(tfsdecode(t convert_pathtManifests\\w* s#.*?(?= )| (?=$)icBseZdZd dZdZdZdZedZ dZ dZ dZ e d ed Ze d ed Ze d ed Zd ZRS(s~A list of files built by on exploring the filesystem and filtered by applying various patterns to what we find there. cCsYtjjtjj|p!tj|_|jtj|_d|_ t |_ dS(sd Initialise an instance. :param base: The base directory to explore under. N( tostpathtabspathtnormpathtgetcwdtbasetseptprefixtNonetallfilestsettfiles(tselfR ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyt__init__*s- cCsddlm}m}m}g|_}|j}|g}|j}|j}x|r|}tj |} x| D]{} tj j || } tj| } | j } || r|jt | qu|| ru||  ru|| ququWqPWdS(smFind all files under the base and set ``allfiles`` to the absolute pathnames of files found. i(tS_ISREGtS_ISDIRtS_ISLNKN(tstatRRRR R tpoptappendRtlistdirRtjointst_modeR(RRRRR troottstackRtpushtnamestnametfullnameRtmode((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytfindall9s"          cCsM|j|js-tjj|j|}n|jjtjj|dS(sz Add a file to the manifest. :param item: The pathname to add. This can be relative to the base. N( t startswithR RRRR RtaddR(Rtitem((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR$TscCs"x|D]}|j|qWdS(s Add a list of files to the manifest. :param items: The pathnames to add. These can be relative to the base. N(R$(RtitemsR%((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytadd_many^s csfdtj}|rgt}x'|D]}|tjj|q7W||O}ngtd|DD]}tjj|^q~S(s8 Return sorted files in directory order csj|j|tjd||jkrftjj|\}}|dksVt||ndS(Nsadd_dir added %stt/(R(R)(R$tloggertdebugR RRtsplittAssertionError(tdirstdtparentt_(tadd_dirR(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR2ls  css!|]}tjj|VqdS(N(RRR,(t.0R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pys {s(RRRRtdirnametsortedR(RtwantdirstresultR.tft path_tuple((R2Rs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR5gs   cCst|_g|_dS(sClear all collected files.N(RRR (R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytclear}s cCs|j|\}}}}|dkrcx|D].}|j|dts.tjd|q.q.Wn|dkrx|D]}|j|dt}qvWn{|dkrxl|D].}|j|dtstjd|qqWn3|dkrx$|D]}|j|dt}qWn|dkr`x|D]1}|j|d |s(tjd ||q(q(Wn|d krx|D]}|j|d |}qsWn~|d kr|jdd |stjd |qnG|dkr|jdd |stjd|qntd|dS(sv Process a directive which either adds some files from ``allfiles`` to ``files``, or removes some files from ``files``. :param directive: The directive to process. This should be in a format compatible with distutils ``MANIFEST.in`` files: http://docs.python.org/distutils/sourcedist.html#commands tincludetanchorsno files found matching %rtexcludesglobal-includes3no files found matching %r anywhere in distributionsglobal-excludesrecursive-includeR s-no files found matching %r under directory %rsrecursive-excludetgrafts no directories found matching %rtprunes4no previously-included directories found matching %rsinvalid action %rN( t_parse_directivet_include_patterntTrueR*twarningt_exclude_patterntFalseR R(Rt directivetactiontpatternstthedirt dirpatterntpatterntfound((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytprocess_directivesD                    c Cs{|j}t|dkrA|ddkrA|jddn|d}d}}}|dkrt|d krtd |ng|dD]}t|^q}n|dkrt|d krtd|nt|d}g|d D]}t|^q}nT|dkr[t|d krHtd|nt|d}ntd|||||fS(s Validate a directive. :param directive: The directive to validate. :return: A tuple of action, patterns, thedir, dir_patterns iiR;R=sglobal-includesglobal-excludesrecursive-includesrecursive-excludeR>R?is$%r expects ...is*%r expects ...s!%r expects a single sunknown action %r(R;R=sglobal-includesglobal-excludesrecursive-includesrecursive-excludeR>R?N(R;R=sglobal-includesglobal-exclude(srecursive-includesrecursive-exclude(R>R?(R,tlentinsertR RR(RRFtwordsRGRHRIt dir_patterntword((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR@s:    & & cCszt}|j||||}|jdkr:|jnx9|jD].}|j|rD|jj|t}qDqDW|S(sSelect strings (presumably filenames) from 'self.files' that match 'pattern', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the 'fnmatch' module: '*' and '?' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If 'anchor' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If 'anchor' is false, both of these will match. If 'prefix' is supplied, then only filenames starting with 'prefix' (itself a pattern) and ending with 'pattern', with anything in between them, will match. 'anchor' is ignored in this case. If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and 'pattern' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return True if files are found. N( REt_translate_patternR R R"tsearchRR$RB(RRKR<R tis_regexRLt pattern_reR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyRAs  cCsdt}|j||||}x?t|jD].}|j|r.|jj|t}q.q.W|S(stRemove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions (RERStlistRRTtremoveRB(RRKR<R RURLRVR8((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyRD)s  c CsH|r)t|tr"tj|S|Sntd krY|jdjd\}}}n|r|j|}td kr|j|r|j|st qnd}tj t j j |jd} |d k rtdkr|jd} |j|t|  } nV|j|} | j|r<| j|sBt | t|t| t|!} t j} t jdkrd} ntdkrd| | j | d|f}q;|t|t|t|!}d || | | ||f}nC|r;tdkrd| |}q;d || |t|f}ntj|S(sTranslate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If 'is_regex' true, then 'pattern' is directly compiled to a regex (if it's a string) or just returned as-is (assumes it's a regex object). iiR1R(s\s\\t^s.*s%s%s%s%s.*%s%ss%s%s%s(ii(iiN(ii(ii(ii(t isinstancetstrtretcompilet_PYTHON_VERSIONt _glob_to_ret partitionR#tendswithR-tescapeRRRR R RNR ( RRKR<R RUtstartR1tendRVR t empty_patternt prefix_reR ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyRS=sB   $ *!  $#   #  cCsStj|}tj}tjdkr0d}nd|}tjd||}|S(sTranslate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from 'fnmatch.translate()' in that '*' does not match "special characters" (which are platform-specific). s\s\\\\s\1[^%s]s((? s       PK.e[^00distlib/scripts.pyonu[ abc@sddlmZddlZddlZddlZddlZddlZddlmZm Z m Z ddl m Z ddl mZmZmZmZmZejeZdjZejdZd Zd Zd efd YZdS( i(tBytesIONi(t sysconfigtdetect_encodingtZipFile(tfinder(t FileOperatortget_export_entryt convert_pathtget_executabletin_venvs s^#!.*pythonw?[0-9.]*([ ].*)?$s|# -*- coding: utf-8 -*- if __name__ == '__main__': import sys, re def _resolve(module, func): __import__(module) mod = sys.modules[module] parts = func.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result try: sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) func = _resolve('%(module)s', '%(func)s') rc = func() # None interpreted as 0 except Exception as e: # only supporting Python >= 2.6 sys.stderr.write('%%s\n' %% e) rc = 1 sys.exit(rc) cCsd|kr|jdre|jdd\}}d|kr|jd rd||f}qq|jdsd|}qn|S(Nt s /usr/bin/env it"s%s "%s"s"%s"(t startswithtsplit(t executabletenvt _executable((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_enquote_executableBs t ScriptMakercBseZdZeZdZeeddZ dZ e j j drZdZdZndddZdZeZd Zd Zdd Zd Zed ZejdZejdksejdkrejdkrdZnddZddZ RS(s_ A class to copy or create scripts from source scripts or callable specifications. cCs||_||_||_t|_t|_tjdkpWtjdkoWtjdk|_ t d|_ |p{t ||_ tjdkptjdkotjdk|_dS(NtposixtjavatsX.Ytnt(RsX.Y(t source_dirt target_dirt add_launcherstFalsetforcetclobbertostnamet_nametset_modetsettvariantsRt_fileopt_is_nt(tselfRRRtdry_runtfileop((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt__init__[s     cCsa|jdtr]|jr]tjj|\}}|jdd}tjj||}n|S(Ntguitpythontpythonw(tgetRR$RtpathR treplacetjoin(R%Rtoptionstdntfn((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_get_alternate_executableks RcCs[y,t|}|jddkSWdQXWn(ttfk rVtjd|tSXdS(sl Determine if the specified executable is a script (contains a #! line) is#!NsFailed to open %s(topentreadtOSErrortIOErrortloggertwarningR(R%Rtfp((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _is_shellss cCs^|j|r=ddl}|jjjddkrV|Sn|jjdrV|Sd|S(Nisos.nametLinuxs jython.exes/usr/bin/env %s(R;RtlangtSystemt getPropertytlowertendswith(R%RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_fix_jython_executables RcCst}|jr!|j}t}ntjs9t}nqtrptjj tj ddtj d}n:tjj tj ddtj dtj df}|r|j ||}nt jjdr|j|}ntjj|}|rt|}n|jd}t jd krSd |krSd |krS|d 7}nd ||d}y|jdWn!tk rtd|nX|dkry|j|Wqtk rtd||fqXn|S(Ntscriptsspython%stEXEtBINDIRs python%s%stVERSIONRsutf-8tclis -X:Framess -X:FullFramess -X:Framess#!s s,The shebang (%r) is not decodable from utf-8s?The shebang (%r) is not decodable from the script encoding (%r)(tTrueRRRtis_python_buildRR RR-R/tget_pathtget_config_varR3tsystplatformR RBtnormcaseRtencodetdecodetUnicodeDecodeErrort ValueError(R%tencodingt post_interpR0tenquoteRtshebang((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _get_shebangsL             cCs |jtd|jd|jS(Ntmoduletfunc(tscript_templatetdicttprefixtsuffix(R%tentry((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt_get_script_textscCstjj|}|j|S(N(RR-tbasenametmanifest(R%texenametbase((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt get_manifestscCs|jo|j}tjjd}|s;|||}n||dkrY|jd}n|jd}t} t| d} | jd|WdQX| j } |||| }x|D]} tj j |j | } |rtj j | \}}|jdr|} nd| } y|jj| |Wqltk rtjdd | }tj j|r|tj|ntj| ||jj| |tjd ytj|Wqtk rqXqlXn|jr| jd | rd | |f} ntj j| r:|j r:tjd | qn|jj| ||jrl|jj| gn|j| qWdS(Nsutf-8tpytttws __main__.pys.pys%s.exes:Failed to write executable - trying to use .deleteme logics %s.deletemes0Able to replace executable using .deleteme logict.s%s.%ssSkipping existing file %s(RR$RtlinesepROt _get_launcherRRtwritestrtgetvalueR-R/RtsplitextR R#twrite_binary_filet ExceptionR8R9texiststremovetrenametdebugRARR tset_executable_modetappend(R%tnamesRVt script_bytest filenamestextt use_launcherRitlaunchertstreamtzftzip_dataRtoutnametntetdfname((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _write_scriptsT             c CsQd}|rL|jdg}|rLddj|}|jd}qLn|jd|d|}|j|jd}|j}t} d|jkr| j|nd|jkr| jd|t j d fnd |jkr | jd |t j d fn|r.|jd t r.d} nd} |j | |||| dS(NRtinterpreter_argss %sR sutf-8R0tXs%s%sisX.Ys%s-%siR)tpywRe( R,R/RORWR_RR!R"taddRLtversionRR( R%R^RxR0RTtargsRVtscriptRt scriptnamesRy((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _make_scripts(  !! cCs@t}tjj|jt|}tjj|jtjj|}|j r||j j || r|t j d|dSyt |d}Wn&tk r|jsnd}noX|j}|st jd|j|dStj|jdd}|r&t}|jdp d}n|s|r?|jn|j j|||jrq|j j|gn|j|nt jd||j|j js)t|j\} } |j d |j!| |} d |krd } nd } tjj|} |j"| g| |j#|| n|r<|jndS( Nsnot copying %s (up-to-date)trbs"%s: %s is an empty file (skipping)s s iRscopying and adjusting %s -> %siR+RRe($RRR-R/RRRR`RR#tnewerR8RsR4R7R&tNonetreadlineR9tget_command_namet FIRST_LINE_REtmatchR.RHtgrouptcloset copy_fileR RtRutinfoRtseekRWRR5(R%RRxtadjustRtft first_lineRRTRStlinesRVRyR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt _copy_scriptsR$              %cCs |jjS(N(R#R&(R%((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyR&JscCs||j_dS(N(R#R&(R%tvalue((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyR&NsRcCsftjddkrd}nd}d||f}tjddd}t|j|j}|S( NtPit64t32s%s%s.exeRhii(tstructtcalcsizet__name__trsplitRtfindtbytes(R%tkindtbitsRtdistlib_packagetresult((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyRjVs cCsKg}t|}|dkr1|j||n|j||d||S(s Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. R0N(RRRR(R%t specificationR0RxR^((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pytmakeds   cCs4g}x'|D]}|j|j||q W|S(s Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, (textendR(R%tspecificationsR0RxR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyt make_multiplews N(!Rt __module__t__doc__tSCRIPT_TEMPLATERZRRRHRR(R3RLRMR R;RBRWR_t_DEFAULT_MANIFESTRaRdRRRtpropertyR&tsetterRRRRjRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyRRs,  8   2  4-  (tioRtloggingRtreRRLtcompatRRRt resourcesRtutilRRRRR t getLoggerRR8tstripRtcompileRRRtobjectR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyts     (  PK.e[&Dffdistlib/version.pyonu[ abc @srdZddlZddlZddlmZddddd d d d gZejeZd e fd YZ de fdYZ de fdYZ ejdZdZeZde fdYZdZde fdYZejddfejddfejddfejddfejddfejd dfejd!d"fejd#d$fejd%d&fejd'd(ff Zejd)dfejd*dfejd+d"fejd!d"fejd,dffZejd-Zd.Zd/Zejd0ejZid1d26d1d36d4d56d1d66d7d86dd6dd"6Zd9Zde fd:YZde fd;YZ ejd<ejZ!d=Z"d>Z#d e fd?YZ$d e fd@YZ%dAe fdBYZ&ie&eeedC6e&ee dDdE6e&e#e%edF6Z'e'dCe'dG=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$s ^\d+(\.\d+)*$cCs ||kS(N((tvtctp((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytWttcCs||kp||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+YR,s<=cCs||kp||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+ZR,s>=cCs ||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+[R,s==cCs ||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+\R,s===cCs||kp||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+^R,s~=cCs ||kS(N((R(R)R*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+_R,s!=c Cs|jdkrtdn|j|_}|jj|}|s\td|n|jd}|dj|_|jj |_ g}|drg|dj dD]}|j^q}x|D]}|j j|}|s td||fn|j}|dp#d}|d }|j d r|dkr^td |n|d t}} |jj|s|j|qn|j|t}} |j||| fqWnt||_dS(NsPlease specify a version classs Not valid: %rR,iit,sInvalid %r in %rs~=is.*s==s!=s#'.*' not allowed for %r constraintsi(s==s!=(t version_classtNonet ValueErrorR Rtdist_retmatchtgroupstnametlowertkeytsplittcomp_retendswithtTruetnum_retFalsetappendttupleR( RRtmR5tclistR)t constraintstoptvntprefix((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRbs: ,     cCst|tr!|j|}nx|jD]\}}}|jj|}t|trmt||}n|sd||jjf}t |n||||s+t Sq+Wt S(s Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: String or :class:`Version` instance. s%r not implemented for %s( t isinstanceRR0Rt _operatorstgettgetattrR"R RR>R<(Rtversiontoperatort constraintRFtftmsg((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR4scCsJd}t|jdkrF|jdddkrF|jdd}n|S(Niis==s===(s==s===(R1tlenR(Rtresult((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt exact_versions,cCsGt|t|ks*|j|jkrCtd||fndS(Nscannot compare %s and %s(RR6R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs*cCs/|j||j|jko.|j|jkS(N(RR8R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs cCs|j| S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRscCst|jt|jS(N(R R8R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR!scCsd|jj|jfS(Ns%s(%r)(R"R R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR#scCs|jS(N(R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR$sN(R R R1R0tretcompileR3R:R=RHRR4R&RRRRRR!R#R$(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR'Ns,         %      sk^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?(\.(post)(\d+))?(\.(dev)(\d+))?(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$c CsK|j}tj|}|s4td|n|j}td|djdD}x0t|dkr|ddkr|d }qfW|dsd}nt|d}|dd!}|d d !}|d d !}|d }|dkrd}n|dt|df}|dkr.d}n|dt|df}|dkr]d}n|dt|df}|dkrd}nfg} xQ|jdD]@} | j rdt| f} n d| f} | j | qWt| }|s| r|rd}qd}n|s&d}n|s5d}n||||||fS(NsNot a valid version: %scss|]}t|VqdS(N(tint(t.0R(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys sit.iiiiii i i i tatzt_tfinal(NN((NN((NN(((RXi(RY(RZ(R[( R tPEP440_VERSION_RER4RR5R@R9RPRUR1tisdigitR?( RRAR5tnumstepochtpretposttdevtlocalRtpart((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _pep_440_keysT  #%                      cBsAeZdZdZedddddgZedZRS(sIA rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # minimum two numbers 1.2a # release level must have a release serial 1.2.3b cCsQt|}tj|}|j}td|djdD|_|S(Ncss|]}t|VqdS(N(RU(RVR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys siRW(t_normalized_keyR\R4R5R@R9t_release_clause(RRRQRAR5((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs   &RXtbR)trcRbcstfdjDS(Nc3s(|]}|r|djkVqdS(iN(t PREREL_TAGS(RVtt(R(s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys s(tanyR(R((Rs?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR%s(R R R RtsetRjR&R%(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs cCsUt|}t|}||kr(tS|j|s;tSt|}||dkS(NRW(tstrR<t startswithR>RP(txtytn((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _match_prefix"s    cBseZeZidd6dd6dd6dd6dd 6d d 6d d 6dd6ZdZdZdZdZdZ dZ dZ dZ dZ RS(t_match_compatibles~=t _match_ltR-t _match_gtR.t _match_les<=t _match_ges>=t _match_eqs==t_match_arbitrarys===t _match_nes!=cCsx|r"d|ko|jd}n|jd o:|jd}|rn|jjddd}|j|}n||fS(Nt+iii(RRR9R0(RRKRMRFt strip_localR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _adjust_local<scCsj|j|||\}}||kr+tS|j}djg|D]}t|^qA}t|| S(NRW(R~R>RgtjoinRnRs(RRKRMRFtrelease_clausetitpfx((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRuJs   (cCsj|j|||\}}||kr+tS|j}djg|D]}t|^qA}t|| S(NRW(R~R>RgRRnRs(RRKRMRFRRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRvRs   (cCs%|j|||\}}||kS(N(R~(RRKRMRF((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRwZscCs%|j|||\}}||kS(N(R~(RRKRMRF((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRx^scCsC|j|||\}}|s0||k}nt||}|S(N(R~Rs(RRKRMRFRQ((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRybs cCst|t|kS(N(Rn(RRKRMRF((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRzjscCsD|j|||\}}|s0||k}nt|| }|S(N(R~Rs(RRKRMRFRQ((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR{ms cCs|j|||\}}||kr+tS||kr;tS|j}t|dkrc|d }ndjg|D]}t|^qp}t||S(NiiRW(R~R<R>RgRPRRnRs(RRKRMRFRRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRtus    ((R R RR0RHR~RuRvRwRxRyRzR{Rt(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR-s&         s[.+-]$R,s^[.](\d)s0.\1s^[.-]s ^\((.*)\)$s\1s^v(ersion)?\s*(\d+)s\2s^r(ev)?\s*(\d+)s[.]{2,}RWs\b(alfa|apha)\btalphas\b(pre-alpha|prealpha)\bs pre.alphas \(beta\)$tbetas ^[:~._+-]+s [,*")([\]]s[~:+_ -]s\.$s (\d+(\.\d+)*)c Cs|jj}x&tD]\}}|j||}qW|sJd}ntj|}|snd}|}n|jdjd}g|D]}t|^q}x#t |dkr|j dqWt |dkr||j }nDdj g|dD]}t |^q||j }|d }dj g|D]}t |^qB}|j}|rx)tD]\}}|j||}qvWn|s|}n&d|krdnd}|||}t|sd}n|S( s Try to suggest a semantic form for a version for which _suggest_normalized_version couldn't come up with anything. s0.0.0iRWiRbt-R|N(R R7t _REPLACEMENTStsubt_NUMERIC_PREFIXR4R5R9RURPR?tendRRnt_SUFFIX_REPLACEMENTSt is_semverR1( RRQtpattreplRARFtsuffixRtsep((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt_suggest_semantic_versions:  : (    cCs yt||SWntk r%nX|j}xSd2d3d4d5d6d7d8d9d:d;d<d=d>d?d@fD]\}}|j||}qfWtjdd|}tjdd|}tjdd|}tjdd|}tjdd|}|jdr |d }ntjd!d|}tjd"d#|}tjd$d%|}tjd&d|}tjd'd(|}tjd)d(|}tjd*d |}tjd+d,|}tjd-d%|}tjd.d/|}tjd0d1|}yt|Wntk rdA}nX|S(BsSuggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. s-alphaRXs-betaRhRRRiR)s-finalR,s-pres-releases.releases-stableR|RWRZt s.finalR[spre$tpre0sdev$tdev0s([abc]|rc)[\-\.](\d+)$s\1\2s[\-\.](dev)[\-\.]?r?(\d+)$s.\1\2s[.~]?([abc])\.?s\1R(is\b0+(\d+)(?!\d)s (\d+[abc])$s\g<1>0s\.?(dev-r|dev\.r)\.?(\d+)$s.dev\2s-(a|b|c)(\d+)$s[\.\-](dev|devel)$s.dev0s(?![\.\-])dev$s(final|stable)$s\.?(r|-|-r)\.?(\d+)$s.post\2s\.?(dev|git|bzr)\.?(\d+)$s\.?(pre|preview|-c)(\d+)$sc\g<2>sp(\d+)$s.post\1(s-alphaRX(s-betaRh(RRX(RRh(RiR)(s-finalR,(s-preR)(s-releaseR,(s.releaseR,(s-stableR,(R|RW(RZRW(RR,(s.finalR,(R[R,N(RfRR7treplaceRSRRoR1(RtrstorigR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt_suggest_normalized_versionsH           s([a-z]+|\d+|[\.-])R)R`tpreviewsfinal-RRit@RbcCsd}g}x||D]}|jdr|dkrgx'|rc|ddkrc|jq@Wnx'|r|ddkr|jqjWn|j|qWt|S(NcSsg}xtj|jD]j}tj||}|rd|d koUdknrl|jd}n d|}|j|qqW|jd|S(Nt0it9it*s*final(t _VERSION_PARTR9R7t_VERSION_REPLACERItzfillR?(RRQR*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt get_partsIs   Rs*finalis*final-t00000000(RotpopR?R@(RRRQR*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _legacy_keyHs  cBs eZdZedZRS(cCs t|S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRcscCsRt}xE|jD]:}t|tr|jdr|dkrt}PqqW|S(NRs*final(R>RRGRRoR<(RRQRp((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR%fs (R R RR&R%(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRbs cBs?eZeZeejZdedt numeric_reR4RntloggertwarningR<R5trsplitRs(RRKRMRFRAR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRtys    ( R R RR0tdictR'RHRSRTRRt(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRqs  sN^(\d+)\.(\d+)\.(\d+)(-[a-z0-9]+(\.[a-z0-9-]+)*)?(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$cCs tj|S(N(t _SEMVER_RER4(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRsc Csd}t|}|s*t|n|j}g|d D]}t|^qA\}}}||dd||dd}} |||f|| fS(NcSsi|dkr|f}nM|djd}tg|D]'}|jrV|jdn|^q5}|S(NiRWi(R1R9R@R]R(RtabsentRQRR*((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt make_tuples   :it|iR(RRR5RU( RRRAR5RtmajortminortpatchR`tbuild((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _semantic_keys  ,'cBs eZdZedZRS(cCs t|S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRscCs|jdddkS(NiiR(R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR%s(R R RR&R%(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs cBseZeZRS((R R RR0(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRst VersionSchemecBs8eZddZdZdZdZdZRS(cCs||_||_||_dS(N(R8tmatchert suggester(RR8RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs  cCs8y|jj|t}Wntk r3t}nX|S(N(RR0R<RR>(RRRQ((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytis_valid_versions    cCs5y|j|t}Wntk r0t}nX|S(N(RR<RR>(RRRQ((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytis_valid_matchers     cCs|jd|S(s: Used for processing some metadata fields sdummy_name (%s)(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytis_valid_constraint_listscCs+|jdkrd}n|j|}|S(N(RR1(RRRQ((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytsuggests N(R R R1RRRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs     t normalizedcCs|S(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+R,tlegacytsemantictdefaultcCs'|tkrtd|nt|S(Nsunknown scheme name: %r(t_SCHEMESR2(R6((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs ()R tloggingRStcompatRt__all__t getLoggerR RR2RtobjectR R'RTR\ReRfRRsRRRRRRtIRR1RRRRRRRRRRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt s~   1k =$ W  . r       #    PK.e[~==distlib/__init__.pycnu[ abc@sddlZdZdefdYZyddlmZWn*ek rhdejfdYZnXejeZ e j edS(iNs0.2.4tDistlibExceptioncBseZRS((t__name__t __module__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyR s(t NullHandlerRcBs#eZdZdZdZRS(cCsdS(N((tselftrecord((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pythandletcCsdS(N((RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pytemitRcCs d|_dS(N(tNonetlock(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyt createLockR(RRRRR (((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyRs  ( tloggingt __version__t ExceptionRRt ImportErrortHandlert getLoggerRtloggert addHandler(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyts  PK.e[46cycydistlib/wheel.pyonu[ abc@sddlmZddlZddlZddlZddlZddlmZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlZddlZddlZddlmZmZddlmZmZmZmZmZddlmZddlm Z m!Z!dd l"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+dd l,m-Z-m.Z.e j/e0Z1e2a3e4ed rd Z5n9ej6j7d rdZ5nej6dkrdZ5ndZ5ej8dZ9e9 rdej:d Z9nde9Z;e5e9Z<ej"j=j>ddj>ddZ?ej8dZ@e@oze@j7dre@j>ddZ@ndZAeAZ@[AejBdejCejDBZEejBdejCejDBZFejBdZGejBdZHd ZId!ZJe jKd"kr$d#ZLn d$ZLd%eMfd&YZNeNZOd'eMfd(YZPd)ZQeQZR[Qe2d*ZSdS(+i(tunicode_literalsN(tmessage_from_filei(t __version__tDistlibException(t sysconfigtZipFiletfsdecodet text_typetfilter(tInstalledDistribution(tMetadatatMETADATA_FILENAME( t FileOperatort convert_patht CSVReadert CSVWritertCachetcached_propertytget_cache_baset read_exportsttempdir(tNormalizedVersiontUnsupportedVersionErrorupypy_version_infouppujavaujyucliuipucpupy_version_nodotu%s%siupyu-u_u.uSOABIucpython-cCs|dtg}tjdr+|jdntjdrJ|jdntjddkro|jdnd j|S( NucpuPy_DEBUGudu WITH_PYMALLOCumuPy_UNICODE_SIZEiuuu(t VER_SUFFIXRtget_config_vartappendtjoin(tparts((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt _derive_abi;s uz (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))? -(?P\w+\d+(\.\w+\d+)*) -(?P\w+) -(?P\w+(\.\w+)*) \.whl$ u7 (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))?$ s \s*#![^\r\n]*s^(\s*#!("[^"]+"|\S+))\s+(.*)$s#!pythons #!pythonwu/cCs|S(N((to((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt]tcCs|jtjdS(Nu/(treplacetostsep(R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR_RtMountercBs8eZdZdZdZddZdZRS(cCsi|_i|_dS(N(t impure_wheelstlibs(tself((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt__init__cs cCs!||j|<|jj|dS(N(R$R%tupdate(R&tpathnamet extensions((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytaddgs cCsI|jj|}x0|D](\}}||jkr|j|=qqWdS(N(R$tpopR%(R&R)R*tktv((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytremovekscCs"||jkr|}nd}|S(N(R%tNone(R&tfullnametpathtresult((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt find_moduleqs cCs|tjkrtj|}nx||jkrAtd|ntj||j|}||_|jdd}t|dkr|d|_ n|S(Nuunable to find extension for %su.ii( tsystmodulesR%t ImportErrortimpt load_dynamict __loader__trsplittlent __package__(R&R1R3R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt load_modulexs N(t__name__t __module__R'R+R/R0R4R>(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR#bs     tWheelcBseZdZdZdZdeedZedZ edZ edZ e dZ dZe d Zd Zdd Zd Zd ZdZdddZdZdZdZdZdZedZdZdZddZRS(u@ Class to build and install from Wheel files (PEP 427). iusha256cCs||_||_d|_tg|_dg|_dg|_tj|_ |dkr{d|_ d|_ |j |_nEtj|}|r|jd}|d|_ |djdd |_ |d |_|j |_ntjj|\}}tj|}|s!td |n|r?tjj||_ n||_|jd}|d|_ |d|_ |d |_|d jd |_|djd |_|djd |_dS(uB Initialise an instance using a (valid) filename. uunoneuanyudummyu0.1unmuvnu_u-ubnuInvalid name or filename: %rupyu.ubiuarN(tsignt should_verifytbuildvertPYVERtpyvertabitarchR!tgetcwdtdirnameR0tnametversiontfilenamet _filenametNAME_VERSION_REtmatcht groupdictR R2tsplitt FILENAME_RERtabspath(R&RMRBtverifytmtinfoRJ((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR'sB                cCs|jrd|j}nd}dj|j}dj|j}dj|j}|jjdd}d|j|||||fS(uJ Build and return a filename from the various components. u-uu.u_u%s-%s%s-%s-%s-%s.whl(RDRRFRGRHRLR RK(R&RDRFRGRHRL((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRMs cCs+tjj|j|j}tjj|S(N(R!R2RRJRMtisfile(R&R2((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytexistssccsNxG|jD]<}x3|jD](}x|jD]}|||fVq*WqWq WdS(N(RFRGRH(R&RFRGRH((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyttagssc Cs8tjj|j|j}d|j|jf}d|}tjd}t |d}|j |}|dj dd}t g|D]}t |^q} | d krd} nt} yItj|| } |j| "} || } td | }WdQXWn!tk r-td | nXWdQX|S( Nu%s-%su %s.dist-infouutf-8uru Wheel-Versionu.iuMETADATAtfileobju$Invalid wheel, because %s is missing(ii(R!R2RRJRMRKRLtcodecst getreaderRtget_wheel_metadataRRttupletintR t posixpathtopenR tKeyErrort ValueError(R&R)tname_vertinfo_dirtwrappertzftwheel_metadatatwvtit file_versiontfntmetadata_filenametbftwfR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytmetadatas( %    cCsud|j|jf}d|}tj|d}|j|(}tjd|}t|}WdQXt|S(Nu%s-%su %s.dist-infouWHEELuutf-8( RKRLRaRRbR\R]Rtdict(R&RhReRfRnRoRptmessage((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR^s cCsFtjj|j|j}t|d}|j|}WdQX|S(Nur(R!R2RRJRMRR^(R&R)RhR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRWsc Cstj|}|r|j}|| ||}}d|jkrQt}nt}tj|}|rd|jd}nd}||}||}ns|jd}|jd} |dks|| krd} n&|||d!d krd } nd} t| |}|S( Ntpythonwt iRs s iis ( t SHEBANG_RERPtendtlowertSHEBANG_PYTHONWtSHEBANG_PYTHONtSHEBANG_DETAIL_REtgroupstfind( R&tdataRVRwtshebangtdata_after_shebangtshebang_pythontargstcrtlftterm((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytprocess_shebangs,      cCs|dkr|j}nytt|}Wn!tk rNtd|nX||j}tj|j dj d}||fS(NuUnsupported hash algorithm: %rt=uascii( R0t hash_kindtgetattrthashlibtAttributeErrorRtdigesttbase64turlsafe_b64encodetrstriptdecode(R&R~RthasherR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytget_hashs   !cCs~t|}ttjj||}|j|ddf|jt|%}x|D]}|j|q]WWdQXdS(Nu( tlisttto_posixR!R2trelpathRtsortRtwriterow(R&trecordst record_pathtbasetptwritertrow((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt write_record's   cCsg}|\}}tt|j}xs|D]k\}} t| d} | j} WdQXd|j| } tjj| } |j || | fq+Wtjj |d} |j || |t tjj |d}|j || fdS(Nurbu%s=%suRECORD( RRRRbtreadRR!R2tgetsizeRRRR(R&RWtlibdirt archive_pathsRtdistinfoRfRtapRtfR~Rtsize((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt write_records0s c Cs\t|dtjA}x7|D]/\}}tjd|||j||qWWdQXdS(NuwuWrote %s to %s in wheel(Rtzipfilet ZIP_DEFLATEDtloggertdebugtwrite(R&R)RRhRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt build_zip@sc! s|dkri}nttfdd#d}|dkrgd}tg}tg}tg}n!d}tg}dg}dg}|jd ||_|jd ||_ |jd ||_ |} d |j |j f} d | } d| } g} xKd$D]C}|kr qn|}t jj|rx t j|D]\}}}x|D]}tt jj||}t jj||}tt jj| ||}| j||f|dkrb|jd rbt|d}|j}WdQX|j|}t|d}|j|WdQXqbqbWqLWqqW| }d}xt j|D]\}}}||krxXt|D]G\}}t|}|jdrt jj||}||=PqqWnxl|D]d}t|jd%r qnt jj||}tt jj||}| j||fqWqkWt j|}xf|D]^}|d&krjtt jj||}tt jj| |}| j||fqjqjWd|p|jdtd|g}x4|jD])\}}}|jd |||fqWt jj|d}t|d!}|jd"j|WdQXtt jj| d}| j||f|j || f| | t jj|j!|j"} |j#| | | S('u Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel. cs |kS(N((R(tpaths(s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRNRupurelibuplatlibiufalseutrueunoneuanyupyveruabiuarchu%s-%su%s.datau %s.dist-infoudatauheadersuscriptsu.exeurbNuwbu .dist-infou.pycu.pyouRECORDu INSTALLERuSHAREDuWHEELuWheel-Version: %d.%duGenerator: distlib %suRoot-Is-Purelib: %su Tag: %s-%s-%suwu (upurelibuplatlib(udatauheadersuscripts(u.pycu.pyo(uRECORDu INSTALLERuSHAREDuWHEEL($R0RRtIMPVERtABItARCHREtgetRFRGRHRKRLR!R2tisdirtwalkRRRRRtendswithRbRRRt enumeratetlistdirt wheel_versionRRZRRJRMR(!R&RRZRtlibkeytis_puret default_pyvert default_abit default_archRRetdata_dirRfRtkeyR2troottdirstfilesRmRtrpRRR~RRktdnRiRFRGRHR)((Rs=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytbuildFs  "              %      cCKs |j}|jd}|jdt}tjj|j|j}d|j|j f}d|} d|} t j| t } t j| d} t j| d} t j d}t|d }|j| }||}t|}Wd QX|d jd d }tg|D]}t|^q}||jkrY|rY||j|n|ddkrv|d}n |d}i}|j| D}td|,}x"|D]}|d}||||jd.}6|6r|6jd/}6nWd QXWnt1k rt+j2d0nX|6r|6jd1i}>|6jd2i}?|>s|?r|jdd}@tjj?|@st@d3n|@|_xF|>jAD]8\}:}<d4|:|<f}A|j4|A}4|j5|4q(W|?ritd(6}BxL|?jAD];\}:}<d4|:|<f}A|j4|A|B}4|j5|4qWqqntjj|| }tB|}5tC|}|d=|d=||d5<|5jD||}|r9 |!j/|n|5jE|!|d6||5SWn+t1k r t+jFd7|jGnXWd tHjI|"XWd QXd S(9u Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. uwarnerulib_onlyu%s-%su%s.datau %s.dist-infouWHEELuRECORDuutf-8urNu Wheel-Versionu.iuRoot-Is-Purelibutrueupurelibuplatlibtstreamiuuscriptstdry_runu /RECORD.jwsiusize mismatch for %su=udigest mismatch for %sulib_only: skipping %su.exeu/urbudigest mismatch on write for %su.pyuByte-compilation failedtexc_infoulib_only: returning Noneu1.0uentry_points.txtuconsoleuguiu %s_scriptsuwrap_%su%s:%su %suAUnable to read legacy script metadata, so cannot generate scriptsu extensionsupython.commandsu8Unable to read JSON metadata, so cannot generate scriptsu wrap_consoleuwrap_guiuValid script path not specifiedu%s = %sulibuprefixuinstallation failed.(uconsoleugui(JRRtFalseR!R2RRJRMRKRLRaR R\R]RRbRRRR_R`RRR tTruetrecordR5tdont_write_bytecodettempfiletmkdtempt source_dirR0t target_dirtinfolistt isinstanceRRRtstrt file_sizeRRRt startswithRRR t copy_streamRt byte_compilet Exceptiontwarningtbasenametmaketset_executable_modetextendRWRtvaluestprefixtsuffixtflagstjsontloadRRdtitemsR Rrtwrite_shared_locationstwrite_installed_filest exceptiontrollbacktshutiltrmtree(CR&RtmakertkwargsRtwarnertlib_onlyR)ReRRft metadata_nametwheel_metadata_namet record_nameRgRhtbwfRpRsRjRkRlRRRotreaderRRtdata_pfxtinfo_pfxt script_pfxtfileoptbctoutfilestworkdirtzinfotarcnamet u_arcnametkindtvalueR~t_Rt is_scripttwhereRtoutfilet newdigesttpycRmtworknameRt filenamestdisttcommandsteptepdataRR-tdR.tstconsole_scriptst gui_scriptst script_dirtscripttoptions((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytinstallsD    %            #   "                            cCsGtdkrCtjjttdtjd }t |antS(Nu dylib-cachei( tcacheR0R!R2RRRR5RLR(R&R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt_get_dylib_caches  c Cstjj|j|j}d|j|jf}d|}tj|d}tj d}g}t |dw}y\|j |G}||} t j | } |j} | j|} tjj| j| } tjj| stj| nx| jD]\}}tjj| t|}tjj|sHt}nQtj|j}tjj|}|j|}tj|j}||k}|r|j|| n|j||fqWWdQXWntk rnXWdQX|S(Nu%s-%su %s.dist-infou EXTENSIONSuutf-8ur( R!R2RRJRMRKRLRaR\R]RRbRRRt prefix_to_dirRRtmakedirsRR RYRtstattst_mtimetdatetimet fromtimestamptgetinfot date_timetextractRRc(R&R)ReRfRRgR3RhRoRpR*RRt cache_baseRKRtdestRt file_timeRWt wheel_time((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt_get_extensionss>     !  cCs t|S(uM Determine if a wheel is compatible with the running system. (t is_compatible(R&((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR$scCstS(uP Determine if a wheel is asserted as mountable by its metadata. (R(R&((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt is_mountablescCs tjjtjj|j|j}|jsLd|}t|n|jsqd|}t|n|t jkrt j d|ns|rt jj |nt jj d||j}|rtt jkrt jj tntj||ndS(Nu)Wheel %s not compatible with this Python.u$Wheel %s is marked as not mountable.u%s already in pathi(R!R2RTRRJRMR$RR%R5RRRtinsertR#t_hookt meta_pathR+(R&RR)tmsgR*((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytmounts"'     cCstjjtjj|j|j}|tjkrItjd|n]tjj ||t j krxt j |nt j st tj krtj j t qndS(Nu%s not in path( R!R2RTRRJRMR5RRR/R'R$R((R&R)((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytunmounts' cCstjj|j|j}d|j|jf}d|}d|}tj|t}tj|d}tj|d}t j d}t |d } | j |} || } t | } WdQX| djd d } tg| D]}t|^q}i}| j |D}td |,}x"|D]}|d }|||Fsu0Cannot update non-compliant (PEP-440) version %rR2tlegacyuVersion updated from %r to %r(R0RR}RRR`RRRRR RLRR R( RLR2tupdatedR.RkR RtmdR/((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytupdate_version;s(   0 !     u%s-%su %s.dist-infouRECORDuruutf-8u..uinvalid entry in wheel: %rNRu.whlRu wheel-update-tdiruNot a directory: %r(R!R2RRJRMRKRLRaRRRRRRRRR R0RtmkstemptcloseRRRRRRtcopyfile(R&tmodifiertdest_dirRR-R2R)ReRfRRRhR,RRRR2toriginal_versionRtmodifiedtcurrent_versiontfdtnewpathRRRW((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR( sX           (iiN(R?R@t__doc__RRR0RR'tpropertyRMRYRZRRqR^RWRRRRRRRRR#R$R%R*R+RUR((((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRAs2)    h "    6cCstg}td}xGttjddddD](}|jdj|t|gq1Wg}xLtjD]>\}}}|j drp|j|j dddqpqpW|j t dkr|j dt n|jdg}tg}tjd kr=tjd t}|r=|j\} }}} t|}| g} | dkrg| jd n| dkr| jdn| dkr| jdn| dkr| jdn| dkr| jdnx`|dkr6x@| D]8} d| ||| f} | tkr|j| qqW|d8}qWq=nxH|D]@}x7|D]/} |jdjt|df|| fqQWqDWxwt|D]i\}}|jdjt|fddf|dkr|jdjt|dfddfqqWxwt|D]i\}}|jdjd|fddf|dkr|jdjd|dfddfqqWt|S(uG Return (pyver, abi, arch) tuples compatible with this Python. iiiuu.abiu.iunoneudarwinu(\w+)_(\d+)_(\d+)_(\w+)$ui386uppcufatux86_64ufat3uppc64ufat64uintelu universalu %s_%s_%s_%suanyupy(ui386uppc(ui386uppcux86_64(uppc64ux86_64(ui386ux86_64(ui386ux86_64uinteluppcuppc64(RtrangeR5t version_infoRRRR8t get_suffixesRRRRRR&RtplatformtreRPR|R`t IMP_PREFIXRtset(tversionstmajortminortabisRRR3tarchesRVRKRHtmatchesRPR RGRkRL((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytcompatible_tagss`  $&$               1% 0% 0cCst|tst|}nt}|dkr9t}nxN|D]F\}}}||jkr@||jkr@||jkr@t}Pq@q@W|S(N( RRARR0tCOMPATIBLE_TAGSRFRGRHR(twheelRZR3tverRGRH((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR$s  -(Tt __future__RRR\Rtdistutils.utilt distutilstemailRRR8RtloggingR!RaRDRR5RRRRRtcompatRRRRRtdatabaseR RqR R tutilR R RRRRRRRRLRRt getLoggerR?RR0RthasattrRERCRRRRARERt get_platformR RRRtcompilet IGNORECASEtVERBOSERSRORvR{RzRyR"RtobjectR#R'RARMRNR$(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyts               (@     '   #  > PK.e[إ..distlib/manifest.pyonu[ abc@sdZddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z dgZ ej eZejdejZejd ejejBZejd Zdefd YZdS( su Class representing the list of files in a distribution. Equivalent to distutils.filelist, but fixes some problems. iNi(tDistlibException(tfsdecode(t convert_pathtManifests\\w* s#.*?(?= )| (?=$)icBseZdZd dZdZdZdZedZ dZ dZ dZ e d ed Ze d ed Ze d ed Zd ZRS(s~A list of files built by on exploring the filesystem and filtered by applying various patterns to what we find there. cCsYtjjtjj|p!tj|_|jtj|_d|_ t |_ dS(sd Initialise an instance. :param base: The base directory to explore under. N( tostpathtabspathtnormpathtgetcwdtbasetseptprefixtNonetallfilestsettfiles(tselfR ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyt__init__*s- cCsddlm}m}m}g|_}|j}|g}|j}|j}x|r|}tj |} x| D]{} tj j || } tj| } | j } || r|jt | qu|| ru||  ru|| ququWqPWdS(smFind all files under the base and set ``allfiles`` to the absolute pathnames of files found. i(tS_ISREGtS_ISDIRtS_ISLNKN(tstatRRRR R tpoptappendRtlistdirRtjointst_modeR(RRRRR troottstackRtpushtnamestnametfullnameRtmode((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytfindall9s"          cCsM|j|js-tjj|j|}n|jjtjj|dS(sz Add a file to the manifest. :param item: The pathname to add. This can be relative to the base. N( t startswithR RRRR RtaddR(Rtitem((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR$TscCs"x|D]}|j|qWdS(s Add a list of files to the manifest. :param items: The pathnames to add. These can be relative to the base. N(R$(RtitemsR%((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytadd_many^s csfdtj}|rgt}x'|D]}|tjj|q7W||O}ngtd|DD]}tjj|^q~S(s8 Return sorted files in directory order csX|j|tjd||jkrTtjj|\}}||ndS(Nsadd_dir added %s(R$tloggertdebugR RRtsplit(tdirstdtparentt_(tadd_dirR(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR/ls  css!|]}tjj|VqdS(N(RRR*(t.0R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pys {s(RRRRtdirnametsortedR(RtwantdirstresultR+tft path_tuple((R/Rs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR2gs   cCst|_g|_dS(sClear all collected files.N(RRR (R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytclear}s cCs|j|\}}}}|dkrcx|D].}|j|dts.tjd|q.q.Wn|dkrx|D]}|j|dt}qvWn{|dkrxl|D].}|j|dtstjd|qqWn3|dkrx$|D]}|j|dt}qWn|dkr`x|D]1}|j|d |s(tjd ||q(q(Wn|d krx|D]}|j|d |}qsWn~|d kr|jdd |stjd |qnG|dkr|jdd |stjd|qntd|dS(sv Process a directive which either adds some files from ``allfiles`` to ``files``, or removes some files from ``files``. :param directive: The directive to process. This should be in a format compatible with distutils ``MANIFEST.in`` files: http://docs.python.org/distutils/sourcedist.html#commands tincludetanchorsno files found matching %rtexcludesglobal-includes3no files found matching %r anywhere in distributionsglobal-excludesrecursive-includeR s-no files found matching %r under directory %rsrecursive-excludetgrafts no directories found matching %rtprunes4no previously-included directories found matching %rsinvalid action %rN( t_parse_directivet_include_patterntTrueR(twarningt_exclude_patterntFalseR R(Rt directivetactiontpatternstthedirt dirpatterntpatterntfound((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pytprocess_directivesD                    c Cs{|j}t|dkrA|ddkrA|jddn|d}d}}}|dkrt|d krtd |ng|dD]}t|^q}n|dkrt|d krtd|nt|d}g|d D]}t|^q}nT|dkr[t|d krHtd|nt|d}ntd|||||fS(s Validate a directive. :param directive: The directive to validate. :return: A tuple of action, patterns, thedir, dir_patterns iiR8R:sglobal-includesglobal-excludesrecursive-includesrecursive-excludeR;R<is$%r expects ...is*%r expects ...s!%r expects a single sunknown action %r(R8R:sglobal-includesglobal-excludesrecursive-includesrecursive-excludeR;R<N(R8R:sglobal-includesglobal-exclude(srecursive-includesrecursive-exclude(R;R<(R*tlentinsertR RR(RRCtwordsRDRERFt dir_patterntword((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR=s:    & & cCszt}|j||||}|jdkr:|jnx9|jD].}|j|rD|jj|t}qDqDW|S(sSelect strings (presumably filenames) from 'self.files' that match 'pattern', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the 'fnmatch' module: '*' and '?' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If 'anchor' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If 'anchor' is false, both of these will match. If 'prefix' is supplied, then only filenames starting with 'prefix' (itself a pattern) and ending with 'pattern', with anything in between them, will match. 'anchor' is ignored in this case. If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and 'pattern' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return True if files are found. N( RBt_translate_patternR R R"tsearchRR$R?(RRHR9R tis_regexRIt pattern_reR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR>s  cCsdt}|j||||}x?t|jD].}|j|r.|jj|t}q.q.W|S(stRemove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions (RBRPtlistRRQtremoveR?(RRHR9R RRRIRSR5((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyRA)s  c Cs|r)t|tr"tj|S|Sntd krY|jdjd\}}}n|r|j|}td krqnd}tjtj j |j d} |d k rtdkr|jd} |j|t |  } n2|j|} | t |t | t |!} tj} tjdkr>d} ntdkrnd| | j | d|f}q|t |t |t |!}d || | | ||f}nC|rtdkrd| |}qd || |t |f}ntj|S(sTranslate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If 'is_regex' true, then 'pattern' is directly compiled to a regex (if it's a string) or just returned as-is (assumes it's a regex object). iiR.ts\s\\t^s.*s%s%s%s%s.*%s%ss%s%s%s(ii(iiN(ii(ii(ii(t isinstancetstrtretcompilet_PYTHON_VERSIONt _glob_to_ret partitiontescapeRRRR R RKR ( RRHR9R RRtstartR.tendRSR t empty_patternt prefix_reR ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyRP=s@   $ !  #   #  cCsStj|}tj}tjdkr0d}nd|}tjd||}|S(sTranslate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from 'fnmatch.translate()' in that '*' does not match "special characters" (which are platform-specific). s\s\\\\s\1[^%s]s((?RARPR](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyR%s      O / (  6(RjRdtloggingRRZtsysRVRtcompatRtutilRt__all__t getLoggerRhR(R[tMt_COLLAPSE_PATTERNtSt_COMMENTED_LINEt version_infoR\tobjectR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyt s       PK.e[bGdistlib/markers.pycnu[ abc@sdZddlZddlZddlZddlZddlmZmZddlm Z dgZ de fdYZ dd ZdS( sEParser for the environment markers micro-language defined in PEP 345.iNi(tpython_implementationt string_types(tin_venvt interprett EvaluatorcBs^eZdZi dd6dd6dd6dd6d d 6d d 6d d6dd6dd6Zi ejd6dejd d6ejjdddd6e j d6e e d6ej d6ejd6ejd6ed 6Zd,d!Zd"Zd#Zd,d$Zd%Zd&Zd'Zd(Zd)Zd*Zd+ZRS(-s5 A limited evaluator for Python expressions. cCs ||kS(N((txty((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyttteqcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtgtcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtgtecCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtincCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtltcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRRtltecCs| S(N((R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR RtnotcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR!RtnoteqcCs ||kS(N((RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyR"Rtnotint sys_platforms%s.%sitpython_versiont iitpython_full_versiontos_nametplatform_in_venvtplatform_releasetplatform_versiontplatform_machinetplatform_python_implementationcCs|p i|_d|_dS(su Initialise an instance. :param context: If specified, names are looked up in this mapping. N(tcontexttNonetsource(tselfR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt__init__3scCsHd}d|j|||!}||t|jkrD|d7}n|S(sH Get the part of the source which is causing a problem. i s%rs...(Rtlen(Rtoffsett fragment_lents((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt get_fragment<s  cCst|d|dS(s@ Get a handler for the specified AST node type. sdo_%sN(tgetattrR(Rt node_type((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt get_handlerFscCst|tr||_idd6}|r8||dRtallowed_valuesR/(RR4tvalidtkeytresult((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_attributejs  cCs|j|jd}|jjtjk}|jjtjk}|sR|sRt|r^|sk|r| rxD|jdD]2}|j|}|r|s|ry| ryPqyqyWn|S(Nii(R8tvaluestopR0R-tOrtAndR:(RR4RDtis_ortis_andtn((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_boolopxs c sfd}j}j|}t}xtjjD]\}}||||jjj}|j krt d|nj|}j |||}|sPn|}|}qFW|S(Ncsbt}t|tjr3t|tjr3t}n|s^jj}td|ndS(NsInvalid comparison: %s(tTrueR,R-tStrR@R%R3R/(tlhsnodetrhsnodeRBR$(R4R(s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt sanity_checks $ sunsupported operation: %r( tleftR8RNtziptopst comparatorsR0R1R2t operatorsR/( RR4RRRPtlhsRDRGRQtrhs((R4Rs?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_compares  "  cCs|j|jS(N(R8tbody(RR4((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyt do_expressionscCs|t}|j|jkr1t}|j|j}n+|j|jkr\t}|j|j}n|sxtd|jn|S(Nsinvalid expression: %s(R@R<RRNRAR/(RR4RBRD((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pytdo_namescCs|jS(N(R$(RR4((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pytdo_strsN(R1t __module__t__doc__RWtsystplatformt version_infotversiontsplittostnametstrRtreleasetmachineRRARR R%R(R8R>RERMRZR\R]R^(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRs@                      cCst|j|jS(s Interpret a marker and return a result depending on environment. :param marker: The marker to interpret. :type marker: str :param execution_context: The context used for name lookup. :type execution_context: mapping (RR8tstrip(tmarkertexecution_context((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyRs (R`R-RfRaRbtcompatRRtutilRt__all__tobjectRRR(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyts     PK.e[* distlib/compat.pyonu[ abc@@sddlmZddlZddlZddlZyddlZWnek r]dZnXejddkr ddl m Z e fZ e Z ddlmZddlZddlZddlmZddlmZmZmZmZmZdd lmZmZm Z m!Z!m"Z"m#Z#m$Z$d Zddl%Z%dd l%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.erdd l%m/Z/nddl0Z0ddl1Z1ddl2Z3dd l4m4Z4ddl5Z5e6Z6ddl7m8Z9ddl7m:Z;da<dZ=nddl>m Z e?fZ e?Z ddl>m@ZddlZddlZddlZddlAmZmZmZm=Z=mZm Z mZmZm$Z$ddlBm'Z'mZm&Z&m!Z!m"Z"m*Z*m+Z+m,Z,m-Z-m.Z.erdd lBm/Z/nddlCm)Z)m(Z(m#Z#ddlDjEZ0ddlBjFZ%ddlGjEZ1ddl3Z3dd lHm4Z4ddlIjJZ5eKZ6ddl7m;Z;e9Z9yddlmLZLmMZMWn<ek rdeNfdYZMddZOdZLnXyddlmPZQWn'ek r"deRfdYZQnXyddlmSZSWn*ek rcejTejUBddZSnXdd lVmWZXeYeXd!reXZWn<dd"lVmZZ[d#e[fd$YZZd%eXfd&YZWydd'l\m]Z]Wnek rd(Z]nXyddl^Z^Wn!ek r,dd)lm^Z^nXy e_Z_Wn*e`k rcdd*lambZbd+Z_nXyejcZcejdZdWnJeek rejfZgegd,krd-Zhnd.Zhd/Zcd0ZdnXydd1limjZjWnTek r1dd2lkmlZlmmZmddlZejnd3Zod4Zpd5ZjnXydd6lqmrZrWn!ek ridd6lsmrZrnXejd7 dTkre4jtZtndd9lqmtZtydd:lamuZuWnkek rdd;lamvZvydd<lwmxZyWnek rd=d>ZynXd?evfd@YZunXyddAlzm{Z{Wnek rQddBZ{nXyddClam|Z|Wnek ryddDl}m~ZWn!ek rddDlm~ZnXy ddElmZmZmZWnek rnXdFefdGYZ|nXyddHlmZmZWnek rejndIejZdJZdKefdLYZddMZdNefdOYZdPefdQYZdReRfdSYZnXdS(Ui(tabsolute_importNi(tStringIO(tFileTypei(tshutil(turlparset urlunparseturljointurlsplitt urlunsplit(t urlretrievetquotetunquotet url2pathnamet pathname2urltContentTooShortErrort splittypecC@s+t|tr!|jd}nt|S(Nsutf-8(t isinstancetunicodetencodet_quote(ts((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR s( tRequestturlopentURLErrort HTTPErrortHTTPBasicAuthHandlertHTTPPasswordMgrt HTTPHandlertHTTPRedirectHandlert build_opener(t HTTPSHandler(t HTMLParser(tifilter(t ifilterfalsecC@sYtdkr*ddl}|jdantj|}|rO|jddSd|fS(sJsplituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.iNs ^(.*)@(.*)$ii(t _userprogtNonetretcompiletmatchtgroup(thostR$R&((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt splituser4s  (t TextIOWrapper( RRRR)R R RRR( RR RR R RRRRR(RRR(t filterfalse(tmatch_hostnametCertificateErrorR-cB@seZRS((t__name__t __module__(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR-^sc C@sSg}|stS|jd}|d|d}}|jd}||krhtdt|n|s|j|jkS|dkr|jdnY|jds|jdr|jtj |n"|jtj |j dd x$|D]}|jtj |qWtj d d j |d tj } | j|S( spMatching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 t.iit*s,too many wildcards in certificate DNS name: s[^.]+sxn--s\*s[^.]*s\As\.s\Z(tFalsetsplittcountR-treprtlowertappendt startswithR$tescapetreplaceR%tjoint IGNORECASER&( tdnthostnamet max_wildcardstpatstpartstleftmostt remaindert wildcardstfragtpat((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_dnsname_matchbs(  " &cC@s[|stdng}|jdd }xC|D];\}}|dkr4t||r_dS|j|q4q4W|sxc|jddD]L}xC|D];\}}|dkrt||rdS|j|qqWqWnt|dkrtd|d jtt|fn;t|dkrKtd ||d fn td dS(s=Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. stempty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIREDtsubjectAltNametDNSNtsubjectt commonNameis&hostname %r doesn't match either of %ss, shostname %r doesn't match %ris=no appropriate commonName or subjectAltName fields were found((( t ValueErrortgetRGR7tlenR-R;tmapR5(tcertR>tdnsnamestsantkeytvaluetsub((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR,s.  %(tSimpleNamespacet ContainercB@seZdZdZRS(sR A generic container for when multiple values need to be returned cK@s|jj|dS(N(t__dict__tupdate(tselftkwargs((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__init__s(R.R/t__doc__R\(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRWs(twhichc @sd}tjjr2||r.SdS|dkrYtjjdtj}n|scdS|jtj}t j dkrtj |kr|j dtj ntjjddjtj}t fd|Drg}qg|D]}|^q}n g}t}xu|D]m}tjj|} | |kr+|j| x9|D].} tjj|| } || |rc| SqcWq+q+WdS( sKGiven a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. cS@s5tjj|o4tj||o4tjj| S(N(tostpathtexiststaccesstisdir(tfntmode((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt _access_checks$tPATHtwin32itPATHEXTtc3@s*|] }jj|jVqdS(N(R6tendswith(t.0text(tcmd(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pys sN(R_R`tdirnameR#tenvironRMtdefpathR3tpathseptsystplatformtcurdirtinserttanytsettnormcasetaddR;( RnReR`RftpathexttfilesRmtseentdirtnormdirtthefiletname((Rns>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR^s8  !        (tZipFilet __enter__(t ZipExtFileRcB@s#eZdZdZdZRS(cC@s|jj|jdS(N(RXRY(RZtbase((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\scC@s|S(N((RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRscG@s|jdS(N(tclose(RZtexc_info((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__exit__s(R.R/R\RR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  RcB@s#eZdZdZdZRS(cC@s|S(N((RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR"scG@s|jdS(N(R(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR%scO@stj|||}t|S(N(t BaseZipFiletopenR(RZtargsR[R((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR)s(R.R/RRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR!s  (tpython_implementationcC@s@dtjkrdStjdkr&dStjjdr<dSdS(s6Return a string identifying the Python implementation.tPyPytjavatJythont IronPythontCPython(RstversionR_RR8(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR0s(t sysconfig(tCallablecC@s t|tS(N(RR(tobj((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytcallableDstmbcststricttsurrogateescapecC@sOt|tr|St|tr2|jttStdt|jdS(Nsexpect bytes or str, not %s( Rtbytest text_typeRt _fsencodingt _fserrorst TypeErrorttypeR.(tfilename((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytfsencodeRs cC@sOt|tr|St|tr2|jttStdt|jdS(Nsexpect bytes or str, not %s( RRRtdecodeRRRRR.(R((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytfsdecode[s (tdetect_encoding(tBOM_UTF8tlookupscoding[:=]\s*([-\w.]+)cC@s^|d jjdd}|dks7|jdr;dS|d ksV|jd rZdS|S(s(Imitates get_normal_name in tokenizer.c.i t_t-sutf-8sutf-8-slatin-1s iso-8859-1s iso-latin-1slatin-1-s iso-8859-1-s iso-latin-1-(slatin-1s iso-8859-1s iso-latin-1(slatin-1-s iso-8859-1-s iso-latin-1-(R6R:R8(torig_enctenc((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_get_normal_namels c@s yjjWntk r)dnXtd}d}fd}fd}|}|jtrt|d}d}n|s|gfS||}|r||gfS|}|s||gfS||}|r|||gfS|||gfS(s? The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. sutf-8c@s$y SWntk rdSXdS(NRj(t StopIteration((treadline(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt read_or_stops  c@s7y|jd}WnDtk rYd}dk rJdj|}nt|nXtj|}|ssdSt|d}yt|}WnHt k rdkrd|}ndj|}t|nXr3|j dkr&dkrd}ndj}t|n|d 7}n|S( Nsutf-8s'invalid or missing encoding declarations {} for {!r}isunknown encoding: sunknown encoding for {!r}: {}sencoding problem: utf-8s encoding problem for {!r}: utf-8s-sig( RtUnicodeDecodeErrorR#tformatt SyntaxErrort cookie_retfindallRRt LookupErrorR(tlinet line_stringtmsgtmatchestencodingtcodec(t bom_foundR(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt find_cookies6          is utf-8-sigN(t__self__RtAttributeErrorR#R2R8RtTrue(RRtdefaultRRtfirsttsecond((RRRs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRws4  &         (R9ii(tunescape(tChainMap(tMutableMapping(trecursive_reprs...c@sfd}|S(sm Decorator to make a repr function return fillvalue for a recursive call c@smtfd}td|_td|_td|_tdi|_|S(Nc@sWt|tf}|kr%Sj|z|}Wdj|X|S(N(tidt get_identRztdiscard(RZRStresult(t fillvaluet repr_runningt user_function(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytwrappers  R/R]R.t__annotations__(RxtgetattrR/R]R.R(RR(R(RRs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytdecorating_functions  ((RR((Rs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt_recursive_reprsRcB@seZdZdZdZdZddZdZdZ dZ dZ e d Z ed Zd ZeZd Zed ZdZdZdZdZdZRS(s A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. cG@st|pig|_dS(sInitialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. N(tlisttmaps(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\ scC@st|dS(N(tKeyError(RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __missing__scC@sAx1|jD]&}y ||SWq tk r/q Xq W|j|S(N(RRR(RZRStmapping((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __getitem__s   cC@s||kr||S|S(N((RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMscC@sttj|jS(N(RNRxtunionR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__len__"scC@sttj|jS(N(titerRxRR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__iter__%sc@stfd|jDS(Nc3@s|]}|kVqdS(N((Rltm(RS(s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pys )s(RwR(RZRS((RSs>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __contains__(scC@s t|jS(N(RwR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__bool__+scC@s%dj|djtt|jS(Ns{0.__class__.__name__}({1})s, (RR;ROR5R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__repr__.scG@s|tj||S(s?Create a ChainMap with a single dict created from the iterable.(tdicttfromkeys(tclstiterableR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR3scC@s$|j|jdj|jdS(sHNew ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]ii(t __class__Rtcopy(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR8scC@s|ji|jS(s;New ChainMap with a new dict followed by all previous maps.(RR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt new_child>scC@s|j|jdS(sNew ChainMap from maps[1:].i(RR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytparentsBscC@s||jd|/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __setitem__GscC@s?y|jd|=Wn&tk r:tdj|nXdS(Nis(Key not found in the first mapping: {!r}(RRR(RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __delitem__Js cC@s9y|jdjSWntk r4tdnXdS(sPRemove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.is#No keys found in the first mapping.N(RtpopitemR(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRPs cG@sHy|jdj||SWn&tk rCtdj|nXdS(sWRemove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].is(Key not found in the first mapping: {!r}N(RtpopRR(RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRWs cC@s|jdjdS(s'Clear maps[0], leaving maps[1:] intact.iN(Rtclear(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR^sN(R.R/R]R\RRR#RMRRRRRRt classmethodRRt__copy__RtpropertyRRRRRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs(               (tcache_from_sourcecC@s2|dkrt}n|r$d}nd}||S(Ntcto(R#t __debug__(R`tdebug_overridetsuffix((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRes    (t OrderedDict(R(tKeysViewt ValuesViewt ItemsViewRcB@seZdZdZejdZejdZdZdZdZ e dZ dZ d Z d Zd Zd Zd ZdZeZeZedZddZddZdZdZeddZdZdZdZ dZ!dZ"RS(s)Dictionary that remembers insertion ordercO@st|dkr+tdt|ny |jWn7tk rog|_}||dg|(i|_nX|j||dS(sInitialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. is$expected at most 1 arguments, got %dN(RNRt_OrderedDict__rootRR#t_OrderedDict__mapt_OrderedDict__update(RZRtkwdstroot((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\s    cC@s\||krH|j}|d}|||g|d<|d<|j| od[i]=yiiN(RR(RZRSRTt dict_setitemRtlast((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    )cC@s@||||jj|\}}}||d<||d del od[y]iiN(RR(RZRSt dict_delitemt link_prevt link_next((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  cc@s=|j}|d}x#||k r8|dV|d}qWdS(sod.__iter__() <==> iter(od)iiN(R(RZRtcurr((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    cc@s=|j}|d}x#||k r8|dV|d}qWdS(s#od.__reversed__() <==> reversed(od)iiN(R(RZRR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __reversed__s    cC@smyHx|jjD] }|2qW|j}||dg|(|jjWntk r[nXtj|dS(s.od.clear() -> None. Remove all items from od.N(Rt itervaluesRR#RRR(RZtnodeR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  cC@s|stdn|j}|rO|d}|d}||d<||d (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. sdictionary is emptyiii(RRRRR(RZRRtlinkRRRSRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs            cC@s t|S(sod.keys() -> list of keys in od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytkeysscC@sg|D]}||^qS(s#od.values() -> list of values in od((RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytvaluesscC@s!g|D]}|||f^qS(s.od.items() -> list of (key, value) pairs in od((RZRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytitemsscC@s t|S(s0od.iterkeys() -> an iterator over the keys in od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytiterkeysscc@sx|D]}||VqWdS(s2od.itervalues -> an iterator over the values in odN((RZtk((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs cc@s$x|D]}|||fVqWdS(s=od.iteritems -> an iterator over the (key, value) items in odN((RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt iteritemss cO@s&t|dkr.tdt|fn|sCtdn|d}d}t|dkrr|d}nt|trxw|D]}|||| None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v is8update() takes at most 2 positional arguments (%d given)s,update() takes at least 1 argument (0 given)iiR N((RNRRRthasattrR R (RRRZtotherRSRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRYs&    cC@sC||kr!||}||=|S||jkr?t|n|S(sod.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. (t_OrderedDict__markerR(RZRSRR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR!s  cC@s"||kr||S|||<|S(sDod.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od((RZRSR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt setdefault.s  cC@s|si}nt|tf}||kr4dSd|| repr(od)s...is%s()s%s(%r)N(Rt _get_identRR.R (RZt _repr_runningtcall_key((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR5s   cC@sg|D]}|||g^q}t|j}x'ttD]}|j|dqEW|rx|j|f|fS|j|ffS(s%Return state information for picklingN(tvarsRRRR#R(RZRR t inst_dict((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt __reduce__Cs#cC@s |j|S(s!od.copy() -> a shallow copy of od(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMscC@s(|}x|D]}||| New ordered dictionary with keys from S and values equal to v (which defaults to None). ((RRRTtdRS((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRQs  cC@sMt|tr=t|t|ko<|j|jkStj||S(sod.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. (RRRNR Rt__eq__(RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\s.cC@s ||k S(N((RZR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt__ne__escC@s t|S(s@od.viewkeys() -> a set-like object providing a view on od's keys(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytviewkeysjscC@s t|S(s<od.viewvalues() -> an object providing a view on od's values(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt viewvaluesnscC@s t|S(sBod.viewitems() -> a set-like object providing a view on od's items(R(RZ((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyt viewitemsrsN(#R.R/R]R\RRRRRRRRR R R R RRRYRtobjectRRR#RRRRRRRRRRR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs:                   (tBaseConfiguratort valid_idents^[a-z_][a-z0-9_]*$cC@s,tj|}|s(td|ntS(Ns!Not a valid Python identifier: %r(t IDENTIFIERR&RLR(RR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR"|stConvertingDictcB@s#eZdZdZddZRS(s A converting dictionary wrapper.cC@sqtj||}|jj|}||k rm|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    cC@sttj|||}|jj|}||k rp|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRMs    N(R.R/R]RR#RM(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR$s cC@sjtj|||}|jj|}||k rft|tttfkrf||_||_ qfn|S(N( RRR%R&RR$R'R(R)RS(RZRSRRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs   R'cB@s#eZdZdZddZRS(sA converting list wrapper.cC@sqtj||}|jj|}||k rm|||/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs    icC@s^tj||}|jj|}||k rZt|tttfkrZ||_qZn|S(N( RRR%R&RR$R'R(R)(RZtidxRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs  (R.R/R]RR(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR's R(cB@seZdZdZRS(sA converting tuple wrapper.cC@sgtj||}|jj|}||k rct|tttfkrc||_||_ qcn|S(N( ttupleRR%R&RR$R'R(R)RS(RZRSRTR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyRs   (R.R/R]R(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR(sR!cB@seZdZejdZejdZejdZejdZejdZ idd6dd 6Z e e Z d Zd Zd Zd ZdZdZdZRS(sQ The configurator base class which defines some useful defaults. s%^(?P[a-z]+)://(?P.*)$s ^\s*(\w+)\s*s^\.\s*(\w+)\s*s^\[\s*(\w+)\s*\]\s*s^\d+$t ext_convertRmt cfg_converttcfgcC@st||_||j_dS(N(R$tconfigR%(RZR/((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR\sc C@s|jd}|jd}yy|j|}x_|D]W}|d|7}yt||}Wq7tk r|j|t||}q7Xq7W|SWnVtk rtjd\}}td||f}|||_ |_ |nXdS(sl Resolve strings to objects using standard import and attribute syntax. R0iisCannot resolve %r: %sN( R3RtimporterRRt ImportErrorRsRRLt __cause__t __traceback__( RZRRtusedtfoundREtettbtv((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytresolves"    cC@s |j|S(s*Default converter for the ext:// protocol.(R9(RZRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR,scC@sO|}|jj|}|dkr7td|n||j}|j|jd}x|rJ|jj|}|r||jd}n|jj|}|r|jd}|j j|s||}qyt |}||}Wqt k r||}qXn|r1||j}qatd||fqaW|S(s*Default converter for the cfg:// protocol.sUnable to convert %risUnable to convert %r at %rN( t WORD_PATTERNR&R#RLtendR/tgroupst DOT_PATTERNt INDEX_PATTERNt DIGIT_PATTERNtintR(RZRTtrestRRR*tn((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR-s2     cC@s/t|t r7t|tr7t|}||_nt|t rnt|trnt|}||_nt|t rt|trt|}||_nt|tr+|j j |}|r+|j }|d}|j j |d}|r(|d}t||}||}q(q+n|S(s Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. tprefixRN(RR$RR%R'RR(R+t string_typestCONVERT_PATTERNR&t groupdicttvalue_convertersRMR#R(RZRTRRRCt converterR((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR&)s*         c C@s|jd}t|s-|j|}n|jdd}tg|D]"}t|rI|||f^qI}||}|rx-|jD]\}}t|||qWn|S(s1Configure an object with a user-supplied factory.s()R0N(RRR9R#RR"R tsetattr( RZR/RtpropsRR[RRRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytconfigure_customEs 5 cC@s"t|trt|}n|S(s0Utility function which converts lists to tuples.(RRR+(RZRT((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pytas_tupleSs(R.R/R]R$R%RER:R=R>R?RGt staticmethodt __import__R0R\R9R,R-R&RKRL(((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyR!s"      "  (ii(t __future__RR_R$RstsslR1R#t version_infoRt basestringRDRRttypesRt file_typet __builtin__tbuiltinst ConfigParsert configparsert _backportRRRRRRturllibR R RR R R RRturllib2RRRRRRRRRRthttplibt xmlrpclibtQueuetqueueRthtmlentitydefst raw_inputt itertoolsR tfilterR!R+R"R)tiotstrR*t urllib.parseturllib.requestt urllib.errort http.clienttclienttrequestt xmlrpc.clientt html.parsert html.entitiestentitiestinputR,R-RLRGRVRWR R^tF_OKtX_OKtzipfileRRRRtBaseZipExtFileRtRRRt NameErrort collectionsRRRRtgetfilesystemencodingRRttokenizeRtcodecsRRR%RRthtmlR9tcgiRRRtreprlibRRtimpRRtthreadRRt dummy_threadt_abcollRRRRtlogging.configR!R"tIR#R$RRR'R+R((((s>/usr/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyts$        (4  @         @F   2 +  A                   [   b          PK.e[x;x;distlib/scripts.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2013-2015 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from io import BytesIO import logging import os import re import struct import sys from .compat import sysconfig, detect_encoding, ZipFile from .resources import finder from .util import (FileOperator, get_export_entry, convert_path, get_executable, in_venv) logger = logging.getLogger(__name__) _DEFAULT_MANIFEST = ''' '''.strip() # check if Python is called on the first line with this expression FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*- if __name__ == '__main__': import sys, re def _resolve(module, func): __import__(module) mod = sys.modules[module] parts = func.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result try: sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) func = _resolve('%(module)s', '%(func)s') rc = func() # None interpreted as 0 except Exception as e: # only supporting Python >= 2.6 sys.stderr.write('%%s\\n' %% e) rc = 1 sys.exit(rc) ''' def _enquote_executable(executable): if ' ' in executable: # make sure we quote only the executable in case of env # for example /usr/bin/env "/dir with spaces/bin/jython" # instead of "/usr/bin/env /dir with spaces/bin/jython" # otherwise whole if executable.startswith('/usr/bin/env '): env, _executable = executable.split(' ', 1) if ' ' in _executable and not _executable.startswith('"'): executable = '%s "%s"' % (env, _executable) else: if not executable.startswith('"'): executable = '"%s"' % executable return executable class ScriptMaker(object): """ A class to copy or create scripts from source scripts or callable specifications. """ script_template = SCRIPT_TEMPLATE executable = None # for shebangs def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): self.source_dir = source_dir self.target_dir = target_dir self.add_launchers = add_launchers self.force = False self.clobber = False # It only makes sense to set mode bits on POSIX. self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix') self.variants = set(('', 'X.Y')) self._fileop = fileop or FileOperator(dry_run) self._is_nt = os.name == 'nt' or ( os.name == 'java' and os._name == 'nt') def _get_alternate_executable(self, executable, options): if options.get('gui', False) and self._is_nt: # pragma: no cover dn, fn = os.path.split(executable) fn = fn.replace('python', 'pythonw') executable = os.path.join(dn, fn) return executable if sys.platform.startswith('java'): # pragma: no cover def _is_shell(self, executable): """ Determine if the specified executable is a script (contains a #! line) """ try: with open(executable) as fp: return fp.read(2) == '#!' except (OSError, IOError): logger.warning('Failed to open %s', executable) return False def _fix_jython_executable(self, executable): if self._is_shell(executable): # Workaround for Jython is not needed on Linux systems. import java if java.lang.System.getProperty('os.name') == 'Linux': return executable elif executable.lower().endswith('jython.exe'): # Use wrapper exe for Jython on Windows return executable return '/usr/bin/env %s' % executable def _get_shebang(self, encoding, post_interp=b'', options=None): enquote = True if self.executable: executable = self.executable enquote = False # assume this will be taken care of elif not sysconfig.is_python_build(): executable = get_executable() elif in_venv(): # pragma: no cover executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: # pragma: no cover executable = os.path.join( sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) if sys.platform.startswith('java'): # pragma: no cover executable = self._fix_jython_executable(executable) # Normalise case for Windows executable = os.path.normcase(executable) # If the user didn't specify an executable, it may be necessary to # cater for executable paths with spaces (not uncommon on Windows) if enquote: executable = _enquote_executable(executable) # Issue #51: don't use fsencode, since we later try to # check that the shebang is decodable using utf-8. executable = executable.encode('utf-8') # in case of IronPython, play safe and enable frames support if (sys.platform == 'cli' and '-X:Frames' not in post_interp and '-X:FullFrames' not in post_interp): # pragma: no cover post_interp += b' -X:Frames' shebang = b'#!' + executable + post_interp + b'\n' # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: # pragma: no cover raise ValueError( 'The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. if encoding != 'utf-8': try: shebang.decode(encoding) except UnicodeDecodeError: # pragma: no cover raise ValueError( 'The shebang (%r) is not decodable ' 'from the script encoding (%r)' % (shebang, encoding)) return shebang def _get_script_text(self, entry): return self.script_template % dict(module=entry.prefix, func=entry.suffix) manifest = _DEFAULT_MANIFEST def get_manifest(self, exename): base = os.path.basename(exename) return self.manifest % base def _write_script(self, names, shebang, script_bytes, filenames, ext): use_launcher = self.add_launchers and self._is_nt linesep = os.linesep.encode('utf-8') if not use_launcher: script_bytes = shebang + linesep + script_bytes else: # pragma: no cover if ext == 'py': launcher = self._get_launcher('t') else: launcher = self._get_launcher('w') stream = BytesIO() with ZipFile(stream, 'w') as zf: zf.writestr('__main__.py', script_bytes) zip_data = stream.getvalue() script_bytes = launcher + shebang + linesep + zip_data for name in names: outname = os.path.join(self.target_dir, name) if use_launcher: # pragma: no cover n, e = os.path.splitext(outname) if e.startswith('.py'): outname = n outname = '%s.exe' % outname try: self._fileop.write_binary_file(outname, script_bytes) except Exception: # Failed writing an executable - it might be in use. logger.warning('Failed to write executable - trying to ' 'use .deleteme logic') dfname = '%s.deleteme' % outname if os.path.exists(dfname): os.remove(dfname) # Not allowed to fail here os.rename(outname, dfname) # nor here self._fileop.write_binary_file(outname, script_bytes) logger.debug('Able to replace executable using ' '.deleteme logic') try: os.remove(dfname) except Exception: pass # still in use - ignore error else: if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover outname = '%s.%s' % (outname, ext) if os.path.exists(outname) and not self.clobber: logger.warning('Skipping existing file %s', outname) continue self._fileop.write_binary_file(outname, script_bytes) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) def _make_script(self, entry, filenames, options=None): post_interp = b'' if options: args = options.get('interpreter_args', []) if args: args = ' %s' % ' '.join(args) post_interp = args.encode('utf-8') shebang = self._get_shebang('utf-8', post_interp, options=options) script = self._get_script_text(entry).encode('utf-8') name = entry.name scriptnames = set() if '' in self.variants: scriptnames.add(name) if 'X' in self.variants: scriptnames.add('%s%s' % (name, sys.version[0])) if 'X.Y' in self.variants: scriptnames.add('%s-%s' % (name, sys.version[:3])) if options and options.get('gui', False): ext = 'pyw' else: ext = 'py' self._write_script(scriptnames, shebang, script, filenames, ext) def _copy_script(self, script, filenames): adjust = False script = os.path.join(self.source_dir, convert_path(script)) outname = os.path.join(self.target_dir, os.path.basename(script)) if not self.force and not self._fileop.newer(script, outname): logger.debug('not copying %s (up-to-date)', script) return # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, 'rb') except IOError: # pragma: no cover if not self.dry_run: raise f = None else: first_line = f.readline() if not first_line: # pragma: no cover logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) if match: adjust = True post_interp = match.group(1) or b'' if not adjust: if f: f.close() self._fileop.copy_file(script, outname) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) else: logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: encoding, lines = detect_encoding(f.readline) f.seek(0) shebang = self._get_shebang(encoding, post_interp) if b'pythonw' in first_line: # pragma: no cover ext = 'pyw' else: ext = 'py' n = os.path.basename(outname) self._write_script([n], shebang, f.read(), filenames, ext) if f: f.close() @property def dry_run(self): return self._fileop.dry_run @dry_run.setter def dry_run(self, value): self._fileop.dry_run = value if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover # Executable launcher support. # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ def _get_launcher(self, kind): if struct.calcsize('P') == 8: # 64-bit bits = '64' else: bits = '32' name = '%s%s.exe' % (kind, bits) # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] result = finder(distlib_package).find(name).bytes return result # Public API follows def make(self, specification, options=None): """ Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. """ filenames = [] entry = get_export_entry(specification) if entry is None: self._copy_script(specification, filenames) else: self._make_script(entry, filenames, options=options) return filenames def make_multiple(self, specifications, options=None): """ Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, """ filenames = [] for specification in specifications: filenames.extend(self.make(specification, options)) return filenames PK.e[=distlib/markers.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # """Parser for the environment markers micro-language defined in PEP 345.""" import ast import os import sys import platform from .compat import python_implementation, string_types from .util import in_venv __all__ = ['interpret'] class Evaluator(object): """ A limited evaluator for Python expressions. """ operators = { 'eq': lambda x, y: x == y, 'gt': lambda x, y: x > y, 'gte': lambda x, y: x >= y, 'in': lambda x, y: x in y, 'lt': lambda x, y: x < y, 'lte': lambda x, y: x <= y, 'not': lambda x: not x, 'noteq': lambda x, y: x != y, 'notin': lambda x, y: x not in y, } allowed_values = { 'sys_platform': sys.platform, 'python_version': '%s.%s' % sys.version_info[:2], # parsing sys.platform is not reliable, but there is no other # way to get e.g. 2.7.2+, and the PEP is defined with sys.version 'python_full_version': sys.version.split(' ', 1)[0], 'os_name': os.name, 'platform_in_venv': str(in_venv()), 'platform_release': platform.release(), 'platform_version': platform.version(), 'platform_machine': platform.machine(), 'platform_python_implementation': python_implementation(), } def __init__(self, context=None): """ Initialise an instance. :param context: If specified, names are looked up in this mapping. """ self.context = context or {} self.source = None def get_fragment(self, offset): """ Get the part of the source which is causing a problem. """ fragment_len = 10 s = '%r' % (self.source[offset:offset + fragment_len]) if offset + fragment_len < len(self.source): s += '...' return s def get_handler(self, node_type): """ Get a handler for the specified AST node type. """ return getattr(self, 'do_%s' % node_type, None) def evaluate(self, node, filename=None): """ Evaluate a source string or node, using ``filename`` when displaying errors. """ if isinstance(node, string_types): self.source = node kwargs = {'mode': 'eval'} if filename: kwargs['filename'] = filename try: node = ast.parse(node, **kwargs) except SyntaxError as e: s = self.get_fragment(e.offset) raise SyntaxError('syntax error %s' % s) node_type = node.__class__.__name__.lower() handler = self.get_handler(node_type) if handler is None: if self.source is None: s = '(source not available)' else: s = self.get_fragment(node.col_offset) raise SyntaxError("don't know how to evaluate %r %s" % ( node_type, s)) return handler(node) def get_attr_key(self, node): assert isinstance(node, ast.Attribute), 'attribute node expected' return '%s.%s' % (node.value.id, node.attr) def do_attribute(self, node): if not isinstance(node.value, ast.Name): valid = False else: key = self.get_attr_key(node) valid = key in self.context or key in self.allowed_values if not valid: raise SyntaxError('invalid expression: %s' % key) if key in self.context: result = self.context[key] else: result = self.allowed_values[key] return result def do_boolop(self, node): result = self.evaluate(node.values[0]) is_or = node.op.__class__ is ast.Or is_and = node.op.__class__ is ast.And assert is_or or is_and if (is_and and result) or (is_or and not result): for n in node.values[1:]: result = self.evaluate(n) if (is_or and result) or (is_and and not result): break return result def do_compare(self, node): def sanity_check(lhsnode, rhsnode): valid = True if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str): valid = False #elif (isinstance(lhsnode, ast.Attribute) # and isinstance(rhsnode, ast.Attribute)): # klhs = self.get_attr_key(lhsnode) # krhs = self.get_attr_key(rhsnode) # valid = klhs != krhs if not valid: s = self.get_fragment(node.col_offset) raise SyntaxError('Invalid comparison: %s' % s) lhsnode = node.left lhs = self.evaluate(lhsnode) result = True for op, rhsnode in zip(node.ops, node.comparators): sanity_check(lhsnode, rhsnode) op = op.__class__.__name__.lower() if op not in self.operators: raise SyntaxError('unsupported operation: %r' % op) rhs = self.evaluate(rhsnode) result = self.operators[op](lhs, rhs) if not result: break lhs = rhs lhsnode = rhsnode return result def do_expression(self, node): return self.evaluate(node.body) def do_name(self, node): valid = False if node.id in self.context: valid = True result = self.context[node.id] elif node.id in self.allowed_values: valid = True result = self.allowed_values[node.id] if not valid: raise SyntaxError('invalid expression: %s' % node.id) return result def do_str(self, node): return node.s def interpret(marker, execution_context=None): """ Interpret a marker and return a result depending on environment. :param marker: The marker to interpret. :type marker: str :param execution_context: The context used for name lookup. :type execution_context: mapping """ return Evaluator(execution_context).evaluate(marker.strip()) PK.e["\N\Ndistlib/index.pyonu[ abc@sddlZddlZddlZddlZddlZddlZyddlmZWn!ek rddl mZnXddl m Z ddl m Z mZmZmZmZmZddlmZmZmZejeZdZdZd efd YZdS( iN(tThreadi(tDistlibException(tHTTPBasicAuthHandlertRequesttHTTPPasswordMgrturlparset build_openert string_types(tcached_propertytzip_dirt ServerProxyshttps://pypi.python.org/pypitpypit PackageIndexcBseZdZdZddZdZdZdZdZ dZ dZ dd Z dd Z dd Zddd d ddZdZddZddZdddZdZdZddZRS(sc This class represents a package index compatible with PyPI, the Python Package Index. s.----------ThIs_Is_tHe_distlib_index_bouNdaRY_$c Cs|p t|_|jt|j\}}}}}}|sX|sX|sX|d krntd|jnd |_d |_d |_d |_ d |_ t t j dj}x`d D]X} y>tj| dgd|d |} | d kr| |_PnWqtk rqXqWWd QXd S(s Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. thttpthttpssinvalid repository: %stwtgpgtgpg2s --versiontstdouttstderriN(R R(RR(t DEFAULT_INDEXturltread_configurationRRtNonetpassword_handlert ssl_verifierRtgpg_homet rpc_proxytopentostdevnullt subprocesst check_calltOSError( tselfRtschemetnetloctpathtparamstquerytfragtsinktstrc((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt__init__$s( !          cCs3ddlm}ddlm}|}||S(ss Get the distutils command for interacting with PyPI configurations. :return: the command. i(t Distribution(t PyPIRCCommand(tdistutils.coreR-tdistutils.configR.(R"R-R.td((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt_get_pypirc_commandBs cCsy|j}|j|_|j}|jd|_|jd|_|jdd|_|jd|j|_dS(s Read the PyPI access configuration as supported by distutils, getting PyPI to do the actual work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. tusernametpasswordtrealmR t repositoryN(R2RR6t _read_pypirctgetR3R4R5(R"tctcfg((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRLs   cCs0|j|j}|j|j|jdS(s Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. N(tcheck_credentialsR2t _store_pypircR3R4(R"R9((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytsave_configuration[s  cCs|jdks|jdkr-tdnt}t|j\}}}}}}|j|j||j|jt ||_ dS(sp Check that ``username`` and ``password`` have been set, and raise an exception if not. s!username and password must be setN( R3RR4RRRRt add_passwordR5RR(R"tpmt_R$((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyR;gs  !cCs|j|j|j}d|d<|j|jg}|j|}d|d<|j|jg}|j|S(sq Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. tverifys:actiontsubmit(R;tvalidatettodicttencode_requesttitemst send_request(R"tmetadataR1trequesttresponse((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytregisterss     cCsjxYtr[|j}|sPn|jdj}|j|tjd||fqW|jdS(sr Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. sutf-8s%s: %sN(tTruetreadlinetdecodetrstriptappendtloggertdebugtclose(R"tnametstreamtoutbufR*((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt_readers   cCs|jdddg}|dkr-|j}n|rI|jd|gn|dk rn|jdddgntj}tjj|tjj |d}|jd d d |d ||gt j d dj|||fS(s Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. s --status-fdt2s--no-ttys --homedirs--batchs--passphrase-fdt0s.ascs --detach-signs--armors --local-users--outputs invoking: %st N( RRRtextendttempfiletmkdtempRR%tjointbasenameRQRR(R"tfilenametsignert sign_passwordtkeystoretcmdttdtsf((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytget_sign_commands    %c Cs itjd6tjd6}|dk r6tj|d        c Cs|jtjj|s/td|ntjj|d}tjj|sitd|n|j|j|j }}t |j }d d|fd|fg}d||fg}|j ||} |j | S( s2 Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. snot a directory: %rs index.htmls not found: %rs:actiont doc_uploadRTtversionR(s:actionR(R;RR%tisdirRR^RRCRTRR tgetvalueRERG( R"RHtdoc_dirtfnRTRtzip_datatfieldsRRI((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytupload_documentation)s  cCs||jdddg}|dkr-|j}n|rI|jd|gn|jd||gtjddj||S( s| Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. s --status-fdRXs--no-ttys --homedirs--verifys invoking: %sRZN(RRRR[RQRRR^(R"tsignature_filenamet data_filenameRcRd((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytget_verify_commandEs  cCsn|jstdn|j|||}|j|\}}}|dkrdtd|n|dkS(s6 Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False. s0verification unavailable because gpg unavailableiis(verify command failed with error code %s(ii(RRRRv(R"RRRcRdR+RR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pytverify_signature]s     cCs |d kr"d }tjdnMt|ttfrF|\}}nd}tt|}tjd|t|d}|j t |}z|j } d} d} d} d} d| krt | d } n|r|| | | nxyt rp|j| }|sPn| t|7} |j||rJ|j|n| d 7} |r|| | | qqWWd |jXWd QX| dkr| | krtd | | fn|r|j}||krtd ||||fntjd|nd S(s This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. sNo digest specifiedRsDigest specified: %stwbi iiscontent-lengthsContent-LengthiNs1retrieval incomplete: got only %d out of %d bytess.%s digest mismatch for %s: expected %s, got %ssDigest verified: %s(RRQRRt isinstancetlistttupletgetattrRRRGRtinfotintRLRtlenRnRRSRR(R"Rtdestfiletdigestt reporthooktdigesterthashertdfptsfptheaderst blocksizetsizeRtblocknumtblocktactual((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyt download_filevsV        cCsWg}|jr"|j|jn|jr>|j|jnt|}|j|S(s Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). (RRPRRR(R"treqthandlerstopener((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRGs   cCs<g}|j}xy|D]q\}}t|ttfsC|g}nxA|D]9}|jd|d|jdd|jdfqJWqWxG|D]?\}} } |jd|d|| fjdd| fqW|jd|ddfdj|} d|} i| d6tt| d 6} t |j | | S( s& Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. s--s)Content-Disposition: form-data; name="%s"sutf-8ts8Content-Disposition: form-data; name="%s"; filename="%s"s smultipart/form-data; boundary=s Content-typesContent-length( tboundaryRRRR[RwR^tstrRRR(R"RRtpartsRtktvaluestvtkeyR`tvaluetbodytctR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyREs4      cCsbt|tri|d6}n|jdkrIt|jdd|_n|jj||p^dS(NRTttimeoutg@tand(RRRRR Rtsearch(R"ttermstoperator((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyRs N(t__name__t __module__t__doc__RRR,R2RR=R;RKRWRgRvRyRRRRRRGRER(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyR s*      # 8   M  +(RtloggingRRRR\t threadingRt ImportErrortdummy_threadingRRtcompatRRRRRRtutilRR R t getLoggerRRQRt DEFAULT_REALMtobjectR (((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyts       .PK.e[_Ԡ˘˘distlib/wheel.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2013-2016 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import base64 import codecs import datetime import distutils.util from email import message_from_file import hashlib import imp import json import logging import os import posixpath import re import shutil import sys import tempfile import zipfile from . import __version__, DistlibException from .compat import sysconfig, ZipFile, fsdecode, text_type, filter from .database import InstalledDistribution from .metadata import Metadata, METADATA_FILENAME from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, cached_property, get_cache_base, read_exports, tempdir) from .version import NormalizedVersion, UnsupportedVersionError logger = logging.getLogger(__name__) cache = None # created when needed if hasattr(sys, 'pypy_version_info'): IMP_PREFIX = 'pp' elif sys.platform.startswith('java'): IMP_PREFIX = 'jy' elif sys.platform == 'cli': IMP_PREFIX = 'ip' else: IMP_PREFIX = 'cp' VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') if not VER_SUFFIX: # pragma: no cover VER_SUFFIX = '%s%s' % sys.version_info[:2] PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') ABI = sysconfig.get_config_var('SOABI') if ABI and ABI.startswith('cpython-'): ABI = ABI.replace('cpython-', 'cp') else: def _derive_abi(): parts = ['cp', VER_SUFFIX] if sysconfig.get_config_var('Py_DEBUG'): parts.append('d') if sysconfig.get_config_var('WITH_PYMALLOC'): parts.append('m') if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: parts.append('u') return ''.join(parts) ABI = _derive_abi() del _derive_abi FILENAME_RE = re.compile(r''' (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))? -(?P\w+\d+(\.\w+\d+)*) -(?P\w+) -(?P\w+(\.\w+)*) \.whl$ ''', re.IGNORECASE | re.VERBOSE) NAME_VERSION_RE = re.compile(r''' (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))?$ ''', re.IGNORECASE | re.VERBOSE) SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') SHEBANG_PYTHON = b'#!python' SHEBANG_PYTHONW = b'#!pythonw' if os.sep == '/': to_posix = lambda o: o else: to_posix = lambda o: o.replace(os.sep, '/') class Mounter(object): def __init__(self): self.impure_wheels = {} self.libs = {} def add(self, pathname, extensions): self.impure_wheels[pathname] = extensions self.libs.update(extensions) def remove(self, pathname): extensions = self.impure_wheels.pop(pathname) for k, v in extensions: if k in self.libs: del self.libs[k] def find_module(self, fullname, path=None): if fullname in self.libs: result = self else: result = None return result def load_module(self, fullname): if fullname in sys.modules: result = sys.modules[fullname] else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) result = imp.load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: result.__package__ = parts[0] return result _hook = Mounter() class Wheel(object): """ Class to build and install from Wheel files (PEP 427). """ wheel_version = (1, 1) hash_kind = 'sha256' def __init__(self, filename=None, sign=False, verify=False): """ Initialise an instance using a (valid) filename. """ self.sign = sign self.should_verify = verify self.buildver = '' self.pyver = [PYVER] self.abi = ['none'] self.arch = ['any'] self.dirname = os.getcwd() if filename is None: self.name = 'dummy' self.version = '0.1' self._filename = self.filename else: m = NAME_VERSION_RE.match(filename) if m: info = m.groupdict('') self.name = info['nm'] # Reinstate the local version separator self.version = info['vn'].replace('_', '-') self.buildver = info['bn'] self._filename = self.filename else: dirname, filename = os.path.split(filename) m = FILENAME_RE.match(filename) if not m: raise DistlibException('Invalid name or ' 'filename: %r' % filename) if dirname: self.dirname = os.path.abspath(dirname) self._filename = filename info = m.groupdict('') self.name = info['nm'] self.version = info['vn'] self.buildver = info['bn'] self.pyver = info['py'].split('.') self.abi = info['bi'].split('.') self.arch = info['ar'].split('.') @property def filename(self): """ Build and return a filename from the various components. """ if self.buildver: buildver = '-' + self.buildver else: buildver = '' pyver = '.'.join(self.pyver) abi = '.'.join(self.abi) arch = '.'.join(self.arch) # replace - with _ as a local version separator version = self.version.replace('-', '_') return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch) @property def exists(self): path = os.path.join(self.dirname, self.filename) return os.path.isfile(path) @property def tags(self): for pyver in self.pyver: for abi in self.abi: for arch in self.arch: yield pyver, abi, arch @cached_property def metadata(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: wheel_metadata = self.get_wheel_metadata(zf) wv = wheel_metadata['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) if file_version < (1, 1): fn = 'METADATA' else: fn = METADATA_FILENAME try: metadata_filename = posixpath.join(info_dir, fn) with zf.open(metadata_filename) as bf: wf = wrapper(bf) result = Metadata(fileobj=wf) except KeyError: raise ValueError('Invalid wheel, because %s is ' 'missing' % fn) return result def get_wheel_metadata(self, zf): name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver metadata_filename = posixpath.join(info_dir, 'WHEEL') with zf.open(metadata_filename) as bf: wf = codecs.getreader('utf-8')(bf) message = message_from_file(wf) return dict(message) @cached_property def info(self): pathname = os.path.join(self.dirname, self.filename) with ZipFile(pathname, 'r') as zf: result = self.get_wheel_metadata(zf) return result def process_shebang(self, data): m = SHEBANG_RE.match(data) if m: end = m.end() shebang, data_after_shebang = data[:end], data[end:] # Preserve any arguments after the interpreter if b'pythonw' in shebang.lower(): shebang_python = SHEBANG_PYTHONW else: shebang_python = SHEBANG_PYTHON m = SHEBANG_DETAIL_RE.match(shebang) if m: args = b' ' + m.groups()[-1] else: args = b'' shebang = shebang_python + args data = shebang + data_after_shebang else: cr = data.find(b'\r') lf = data.find(b'\n') if cr < 0 or cr > lf: term = b'\n' else: if data[cr:cr + 2] == b'\r\n': term = b'\r\n' else: term = b'\r' data = SHEBANG_PYTHON + term + data return data def get_hash(self, data, hash_kind=None): if hash_kind is None: hash_kind = self.hash_kind try: hasher = getattr(hashlib, hash_kind) except AttributeError: raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) result = hasher(data).digest() result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') return hash_kind, result def write_record(self, records, record_path, base): records = list(records) # make a copy for sorting p = to_posix(os.path.relpath(record_path, base)) records.append((p, '', '')) records.sort() with CSVWriter(record_path) as writer: for row in records: writer.writerow(row) def write_records(self, info, libdir, archive_paths): records = [] distinfo, info_dir = info hasher = getattr(hashlib, self.hash_kind) for ap, p in archive_paths: with open(p, 'rb') as f: data = f.read() digest = '%s=%s' % self.get_hash(data) size = os.path.getsize(p) records.append((ap, digest, size)) p = os.path.join(distinfo, 'RECORD') self.write_record(records, p, libdir) ap = to_posix(os.path.join(info_dir, 'RECORD')) archive_paths.append((ap, p)) def build_zip(self, pathname, archive_paths): with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: for ap, p in archive_paths: logger.debug('Wrote %s to %s in wheel', p, ap) zf.write(p, ap) def build(self, paths, tags=None, wheel_version=None): """ Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel. """ if tags is None: tags = {} libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] if libkey == 'platlib': is_pure = 'false' default_pyver = [IMPVER] default_abi = [ABI] default_arch = [ARCH] else: is_pure = 'true' default_pyver = [PYVER] default_abi = ['none'] default_arch = ['any'] self.pyver = tags.get('pyver', default_pyver) self.abi = tags.get('abi', default_abi) self.arch = tags.get('arch', default_arch) libdir = paths[libkey] name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver archive_paths = [] # First, stuff which is not in site-packages for key in ('data', 'headers', 'scripts'): if key not in paths: continue path = paths[key] if os.path.isdir(path): for root, dirs, files in os.walk(path): for fn in files: p = fsdecode(os.path.join(root, fn)) rp = os.path.relpath(p, path) ap = to_posix(os.path.join(data_dir, key, rp)) archive_paths.append((ap, p)) if key == 'scripts' and not p.endswith('.exe'): with open(p, 'rb') as f: data = f.read() data = self.process_shebang(data) with open(p, 'wb') as f: f.write(data) # Now, stuff which is in site-packages, other than the # distinfo stuff. path = libdir distinfo = None for root, dirs, files in os.walk(path): if root == path: # At the top level only, save distinfo for later # and skip it for now for i, dn in enumerate(dirs): dn = fsdecode(dn) if dn.endswith('.dist-info'): distinfo = os.path.join(root, dn) del dirs[i] break assert distinfo, '.dist-info directory expected, not found' for fn in files: # comment out next suite to leave .pyc files in if fsdecode(fn).endswith(('.pyc', '.pyo')): continue p = os.path.join(root, fn) rp = to_posix(os.path.relpath(p, path)) archive_paths.append((rp, p)) # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. files = os.listdir(distinfo) for fn in files: if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): p = fsdecode(os.path.join(distinfo, fn)) ap = to_posix(os.path.join(info_dir, fn)) archive_paths.append((ap, p)) wheel_metadata = [ 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), 'Generator: distlib %s' % __version__, 'Root-Is-Purelib: %s' % is_pure, ] for pyver, abi, arch in self.tags: wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) p = os.path.join(distinfo, 'WHEEL') with open(p, 'w') as f: f.write('\n'.join(wheel_metadata)) ap = to_posix(os.path.join(info_dir, 'WHEEL')) archive_paths.append((ap, p)) # Now, at last, RECORD. # Paths in here are archive paths - nothing else makes sense. self.write_records((distinfo, info_dir), libdir, archive_paths) # Now, ready to build the zip file pathname = os.path.join(self.dirname, self.filename) self.build_zip(pathname, archive_paths) return pathname def install(self, paths, maker, **kwargs): """ Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. """ dry_run = maker.dry_run warner = kwargs.get('warner') lib_only = kwargs.get('lib_only', False) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) if (file_version != self.wheel_version) and warner: warner(self.wheel_version, file_version) if message['Root-Is-Purelib'] == 'true': libdir = paths['purelib'] else: libdir = paths['platlib'] records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row data_pfx = posixpath.join(data_dir, '') info_pfx = posixpath.join(info_dir, '') script_pfx = posixpath.join(data_dir, 'scripts', '') # make a new instance rather than a copy of maker's, # as we mutate it fileop = FileOperator(dry_run=dry_run) fileop.record = True # so we can rollback if needed bc = not sys.dont_write_bytecode # Double negatives. Lovely! outfiles = [] # for RECORD writing # for script copying/shebang processing workdir = tempfile.mkdtemp() # set target dir later # we default add_launchers to False, as the # Python Launcher should be used instead maker.source_dir = workdir maker.target_dir = None try: for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') # The signature file won't be in RECORD, # and we don't currently don't do anything with it if u_arcname.endswith('/RECORD.jws'): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) if lib_only and u_arcname.startswith((info_pfx, data_pfx)): logger.debug('lib_only: skipping %s', u_arcname) continue is_script = (u_arcname.startswith(script_pfx) and not u_arcname.endswith('.exe')) if u_arcname.startswith(data_pfx): _, where, rp = u_arcname.split('/', 2) outfile = os.path.join(paths[where], convert_path(rp)) else: # meant for site-packages. if u_arcname in (wheel_metadata_name, record_name): continue outfile = os.path.join(libdir, convert_path(u_arcname)) if not is_script: with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) outfiles.append(outfile) # Double check the digest of the written file if not dry_run and row[1]: with open(outfile, 'rb') as bf: data = bf.read() _, newdigest = self.get_hash(data, kind) if newdigest != digest: raise DistlibException('digest mismatch ' 'on write for ' '%s' % outfile) if bc and outfile.endswith('.py'): try: pyc = fileop.byte_compile(outfile) outfiles.append(pyc) except Exception: # Don't give up if byte-compilation fails, # but log it and perhaps warn the user logger.warning('Byte-compilation failed', exc_info=True) else: fn = os.path.basename(convert_path(arcname)) workname = os.path.join(workdir, fn) with zf.open(arcname) as bf: fileop.copy_stream(bf, workname) dn, fn = os.path.split(outfile) maker.target_dir = dn filenames = maker.make(fn) fileop.set_executable_mode(filenames) outfiles.extend(filenames) if lib_only: logger.debug('lib_only: returning None') dist = None else: # Generate scripts # Try to get pydist.json so we can see if there are # any commands to generate. If this fails (e.g. because # of a legacy wheel), log a warning but don't give up. commands = None file_version = self.info['Wheel-Version'] if file_version == '1.0': # Use legacy info ep = posixpath.join(info_dir, 'entry_points.txt') try: with zf.open(ep) as bwf: epdata = read_exports(bwf) commands = {} for key in ('console', 'gui'): k = '%s_scripts' % key if k in epdata: commands['wrap_%s' % key] = d = {} for v in epdata[k].values(): s = '%s:%s' % (v.prefix, v.suffix) if v.flags: s += ' %s' % v.flags d[v.name] = s except Exception: logger.warning('Unable to read legacy script ' 'metadata, so cannot generate ' 'scripts') else: try: with zf.open(metadata_name) as bwf: wf = wrapper(bwf) commands = json.load(wf).get('extensions') if commands: commands = commands.get('python.commands') except Exception: logger.warning('Unable to read JSON metadata, so ' 'cannot generate scripts') if commands: console_scripts = commands.get('wrap_console', {}) gui_scripts = commands.get('wrap_gui', {}) if console_scripts or gui_scripts: script_dir = paths.get('scripts', '') if not os.path.isdir(script_dir): raise ValueError('Valid script path not ' 'specified') maker.target_dir = script_dir for k, v in console_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script) fileop.set_executable_mode(filenames) if gui_scripts: options = {'gui': True } for k, v in gui_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script, options) fileop.set_executable_mode(filenames) p = os.path.join(libdir, info_dir) dist = InstalledDistribution(p) # Write SHARED paths = dict(paths) # don't change passed in dict del paths['purelib'] del paths['platlib'] paths['lib'] = libdir p = dist.write_shared_locations(paths, dry_run) if p: outfiles.append(p) # Write RECORD dist.write_installed_files(outfiles, paths['prefix'], dry_run) return dist except Exception: # pragma: no cover logger.exception('installation failed.') fileop.rollback() raise finally: shutil.rmtree(workdir) def _get_dylib_cache(self): global cache if cache is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('dylib-cache'), sys.version[:3]) cache = Cache(base) return cache def _get_extensions(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver arcname = posixpath.join(info_dir, 'EXTENSIONS') wrapper = codecs.getreader('utf-8') result = [] with ZipFile(pathname, 'r') as zf: try: with zf.open(arcname) as bf: wf = wrapper(bf) extensions = json.load(wf) cache = self._get_dylib_cache() prefix = cache.prefix_to_dir(pathname) cache_base = os.path.join(cache.base, prefix) if not os.path.isdir(cache_base): os.makedirs(cache_base) for name, relpath in extensions.items(): dest = os.path.join(cache_base, convert_path(relpath)) if not os.path.exists(dest): extract = True else: file_time = os.stat(dest).st_mtime file_time = datetime.datetime.fromtimestamp(file_time) info = zf.getinfo(relpath) wheel_time = datetime.datetime(*info.date_time) extract = wheel_time > file_time if extract: zf.extract(relpath, cache_base) result.append((name, dest)) except KeyError: pass return result def is_compatible(self): """ Determine if a wheel is compatible with the running system. """ return is_compatible(self) def is_mountable(self): """ Determine if a wheel is asserted as mountable by its metadata. """ return True # for now - metadata details TBD def mount(self, append=False): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if not self.is_compatible(): msg = 'Wheel %s not compatible with this Python.' % pathname raise DistlibException(msg) if not self.is_mountable(): msg = 'Wheel %s is marked as not mountable.' % pathname raise DistlibException(msg) if pathname in sys.path: logger.debug('%s already in path', pathname) else: if append: sys.path.append(pathname) else: sys.path.insert(0, pathname) extensions = self._get_extensions() if extensions: if _hook not in sys.meta_path: sys.meta_path.append(_hook) _hook.add(pathname, extensions) def unmount(self): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if pathname not in sys.path: logger.debug('%s not in path', pathname) else: sys.path.remove(pathname) if pathname in _hook.impure_wheels: _hook.remove(pathname) if not _hook.impure_wheels: if _hook in sys.meta_path: sys.meta_path.remove(_hook) def verify(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) # TODO version verification records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if '..' in u_arcname: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) # The signature file won't be in RECORD, # and we don't currently don't do anything with it if u_arcname.endswith('/RECORD.jws'): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) def update(self, modifier, dest_dir=None, **kwargs): """ Update the contents of a wheel in a generic way. The modifier should be a callable which expects a dictionary argument: its keys are archive-entry paths, and its values are absolute filesystem paths where the contents the corresponding archive entries can be found. The modifier is free to change the contents of the files pointed to, add new entries and remove entries, before returning. This method will extract the entire contents of the wheel to a temporary location, call the modifier, and then use the passed (and possibly updated) dictionary to write a new wheel. If ``dest_dir`` is specified, the new wheel is written there -- otherwise, the original wheel is overwritten. The modifier should return True if it updated the wheel, else False. This method returns the same value the modifier returns. """ def get_version(path_map, info_dir): version = path = None key = '%s/%s' % (info_dir, METADATA_FILENAME) if key not in path_map: key = '%s/PKG-INFO' % info_dir if key in path_map: path = path_map[key] version = Metadata(path=path).version return version, path def update_version(version, path): updated = None try: v = NormalizedVersion(version) i = version.find('-') if i < 0: updated = '%s+1' % version else: parts = [int(s) for s in version[i + 1:].split('.')] parts[-1] += 1 updated = '%s+%s' % (version[:i], '.'.join(str(i) for i in parts)) except UnsupportedVersionError: logger.debug('Cannot update non-compliant (PEP-440) ' 'version %r', version) if updated: md = Metadata(path=path) md.version = updated legacy = not path.endswith(METADATA_FILENAME) md.write(path=path, legacy=legacy) logger.debug('Version updated from %r to %r', version, updated) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver record_name = posixpath.join(info_dir, 'RECORD') with tempdir() as workdir: with ZipFile(pathname, 'r') as zf: path_map = {} for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if u_arcname == record_name: continue if '..' in u_arcname: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) zf.extract(zinfo, workdir) path = os.path.join(workdir, convert_path(u_arcname)) path_map[u_arcname] = path # Remember the version. original_version, _ = get_version(path_map, info_dir) # Files extracted. Call the modifier. modified = modifier(path_map, **kwargs) if modified: # Something changed - need to build a new wheel. current_version, path = get_version(path_map, info_dir) if current_version and (current_version == original_version): # Add or update local version to signify changes. update_version(current_version, path) # Decide where the new wheel goes. if dest_dir is None: fd, newpath = tempfile.mkstemp(suffix='.whl', prefix='wheel-update-', dir=workdir) os.close(fd) else: if not os.path.isdir(dest_dir): raise DistlibException('Not a directory: %r' % dest_dir) newpath = os.path.join(dest_dir, self.filename) archive_paths = list(path_map.items()) distinfo = os.path.join(workdir, info_dir) info = distinfo, info_dir self.write_records(info, workdir, archive_paths) self.build_zip(newpath, archive_paths) if dest_dir is None: shutil.copyfile(newpath, pathname) return modified def compatible_tags(): """ Return (pyver, abi, arch) tuples compatible with this Python. """ versions = [VER_SUFFIX] major = VER_SUFFIX[0] for minor in range(sys.version_info[1] - 1, - 1, -1): versions.append(''.join([major, str(minor)])) abis = [] for suffix, _, _ in imp.get_suffixes(): if suffix.startswith('.abi'): abis.append(suffix.split('.', 2)[1]) abis.sort() if ABI != 'none': abis.insert(0, ABI) abis.append('none') result = [] arches = [ARCH] if sys.platform == 'darwin': m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) if m: name, major, minor, arch = m.groups() minor = int(minor) matches = [arch] if arch in ('i386', 'ppc'): matches.append('fat') if arch in ('i386', 'ppc', 'x86_64'): matches.append('fat3') if arch in ('ppc64', 'x86_64'): matches.append('fat64') if arch in ('i386', 'x86_64'): matches.append('intel') if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): matches.append('universal') while minor >= 0: for match in matches: s = '%s_%s_%s_%s' % (name, major, minor, match) if s != ARCH: # already there arches.append(s) minor -= 1 # Most specific - our Python version, ABI and arch for abi in abis: for arch in arches: result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) # where no ABI / arch dependency, but IMP_PREFIX dependency for i, version in enumerate(versions): result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) if i == 0: result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) # no IMP_PREFIX, ABI or arch dependency for i, version in enumerate(versions): result.append((''.join(('py', version)), 'none', 'any')) if i == 0: result.append((''.join(('py', version[0])), 'none', 'any')) return set(result) COMPATIBLE_TAGS = compatible_tags() del compatible_tags def is_compatible(wheel, tags=None): if not isinstance(wheel, Wheel): wheel = Wheel(wheel) # assume it's a filename result = False if tags is None: tags = COMPATIBLE_TAGS for ver, abi, arch in tags: if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: result = True break return result PK.e[xEEdistlib/locators.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2015 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import gzip from io import BytesIO import json import logging import os import posixpath import re try: import threading except ImportError: # pragma: no cover import dummy_threading as threading import zlib from . import DistlibException from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, string_types, build_opener, HTTPRedirectHandler as BaseRedirectHandler, text_type, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata from .util import (cached_property, parse_credentials, ensure_slash, split_filename, get_project_data, parse_requirement, parse_name_and_version, ServerProxy, normalize_name) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible logger = logging.getLogger(__name__) HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)') CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') DEFAULT_INDEX = 'https://pypi.python.org/pypi' def get_all_distribution_names(url=None): """ Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. """ if url is None: url = DEFAULT_INDEX client = ServerProxy(url, timeout=3.0) return client.list_packages() class RedirectHandler(BaseRedirectHandler): """ A class to work around a bug in some Python 3.2.x releases. """ # There's a bug in the base version for some 3.2.x # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header # returns e.g. /abc, it bails because it says the scheme '' # is bogus, when actually it should use the request's # URL for the scheme. See Python issue #13696. def http_error_302(self, req, fp, code, msg, headers): # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. newurl = None for key in ('location', 'uri'): if key in headers: newurl = headers[key] break if newurl is None: return urlparts = urlparse(newurl) if urlparts.scheme == '': newurl = urljoin(req.get_full_url(), newurl) if hasattr(headers, 'replace_header'): headers.replace_header(key, newurl) else: headers[key] = newurl return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 class Locator(object): """ A base class for locators - things that locate distributions. """ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') binary_extensions = ('.egg', '.exe', '.whl') excluded_extensions = ('.pdf',) # A list of tags indicating which wheels you want to match. The default # value of None matches against the tags compatible with the running # Python. If you want to match other values, set wheel_tags on a locator # instance to a list of tuples (pyver, abi, arch) which you want to match. wheel_tags = None downloadable_extensions = source_extensions + ('.whl',) def __init__(self, scheme='default'): """ Initialise an instance. :param scheme: Because locators look for most recent versions, they need to know the version scheme to use. This specifies the current PEP-recommended scheme - use ``'legacy'`` if you need to support existing distributions on PyPI. """ self._cache = {} self.scheme = scheme # Because of bugs in some of the handlers on some of the platforms, # we use our own opener rather than just using urlopen. self.opener = build_opener(RedirectHandler()) # If get_project() is called from locate(), the matcher instance # is set from the requirement passed to locate(). See issue #18 for # why this can be useful to know. self.matcher = None self.errors = queue.Queue() def get_errors(self): """ Return any errors which have occurred. """ result = [] while not self.errors.empty(): # pragma: no cover try: e = self.errors.get(False) result.append(e) except self.errors.Empty: continue self.errors.task_done() return result def clear_errors(self): """ Clear any errors which may have been logged. """ # Just get the errors and throw them away self.get_errors() def clear_cache(self): self._cache.clear() def _get_scheme(self): return self._scheme def _set_scheme(self, value): self._scheme = value scheme = property(_get_scheme, _set_scheme) def _get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. """ raise NotImplementedError('Please implement in the subclass') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Please implement in the subclass') def get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. """ if self._cache is None: result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: self.clear_errors() result = self._get_project(name) self._cache[name] = result return result def score_url(self, url): """ Give an url a score which can be used to choose preferred URLs for a given project release. """ t = urlparse(url) basename = posixpath.basename(t.path) compatible = True is_wheel = basename.endswith('.whl') if is_wheel: compatible = is_compatible(Wheel(basename), self.wheel_tags) return (t.scheme != 'https', 'pypi.python.org' in t.netloc, is_wheel, compatible, basename) def prefer_url(self, url1, url2): """ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implementation favours https:// URLs over http://, archives from PyPI over those from other locations, wheel compatibility (if a wheel) and then the archive name. """ result = url2 if url1: s1 = self.score_url(url1) s2 = self.score_url(url2) if s1 > s2: result = url1 if result != url2: logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result def split_filename(self, filename, project_name): """ Attempt to split a filename in project name, version and Python version. """ return split_filename(filename, project_name) def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: # pragma: no cover logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result def _get_digest(self, info): """ Get a digest from a dictionary by looking at keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None for algo in ('sha256', 'md5'): key = '%s_digest' % algo if key in info: result = (algo, info[key]) break return result def _update_version_data(self, result, info): """ Update a result dictionary (the final result from _get_project) with a dictionary for a specific version, which typically holds information gleaned from a filename or URL for an archive for the distribution. """ name = info.pop('name') version = info.pop('version') if version in result: dist = result[version] md = dist.metadata else: dist = make_dist(name, version, scheme=self.scheme) md = dist.metadata dist.digest = digest = self._get_digest(info) url = info['url'] result['digests'][url] = digest if md.source_url != info['url']: md.source_url = self.prefer_url(md.source_url, url) result['urls'].setdefault(version, set()).add(url) dist.locator = self result[version] = dist def locate(self, requirement, prereleases=False): """ Find the most recent distribution which matches the given requirement. :param requirement: A requirement of the form 'foo (1.0)' or perhaps 'foo (>= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. """ result = None r = parse_requirement(requirement) if r is None: raise DistlibException('Not a valid requirement: %r' % requirement) scheme = get_scheme(self.scheme) self.matcher = matcher = scheme.matcher(r.requirement) logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(r.name) if len(versions) > 2: # urls and digests keys are present # sometimes, versions are invalid slist = [] vcls = matcher.version_class for k in versions: if k in ('urls', 'digests'): continue try: if not matcher.match(k): logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) else: logger.debug('skipping pre-release ' 'version %s of %s', k, matcher.name) except Exception: # pragma: no cover logger.warning('error matching %s with %r', matcher, k) pass # slist.append(k) if len(slist) > 1: slist = sorted(slist, key=scheme.key) if slist: logger.debug('sorted list: %s', slist) version = slist[-1] result = versions[version] if result: if r.extras: result.extras = r.extras result.download_urls = versions.get('urls', {}).get(version, set()) d = {} sd = versions.get('digests', {}) for url in result.download_urls: if url in sd: d[url] = sd[url] result.digests = d self.matcher = None return result class PyPIRPCLocator(Locator): """ This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). """ def __init__(self, url, **kwargs): """ Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. """ super(PyPIRPCLocator, self).__init__(**kwargs) self.base_url = url self.client = ServerProxy(url, timeout=3.0) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ return set(self.client.list_packages()) def _get_project(self, name): result = {'urls': {}, 'digests': {}} versions = self.client.package_releases(name, True) for v in versions: urls = self.client.release_urls(name, v) data = self.client.release_data(name, v) metadata = Metadata(scheme=self.scheme) metadata.name = data['name'] metadata.version = data['version'] metadata.license = data.get('license') metadata.keywords = data.get('keywords', []) metadata.summary = data.get('summary') dist = Distribution(metadata) if urls: info = urls[0] metadata.source_url = info['url'] dist.digest = self._get_digest(info) dist.locator = self result[v] = dist for info in urls: url = info['url'] digest = self._get_digest(info) result['urls'].setdefault(v, set()).add(url) result['digests'][url] = digest return result class PyPIJSONLocator(Locator): """ This locator uses PyPI's JSON interface. It's very limited in functionality and probably not worth using. """ def __init__(self, url, **kwargs): super(PyPIJSONLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {'urls': {}, 'digests': {}} url = urljoin(self.base_url, '%s/json' % quote(name)) try: resp = self.opener.open(url) data = resp.read().decode() # for now d = json.loads(data) md = Metadata(scheme=self.scheme) data = d['info'] md.name = data['name'] md.version = data['version'] md.license = data.get('license') md.keywords = data.get('keywords', []) md.summary = data.get('summary') dist = Distribution(md) dist.locator = self urls = d['urls'] result[md.version] = dist for info in d['urls']: url = info['url'] dist.download_urls.add(url) dist.digests[url] = self._get_digest(info) result['urls'].setdefault(md.version, set()).add(url) result['digests'][url] = self._get_digest(info) # Now get other releases for version, infos in d['releases'].items(): if version == md.version: continue # already done omd = Metadata(scheme=self.scheme) omd.name = md.name omd.version = version odist = Distribution(omd) odist.locator = self result[version] = odist for info in infos: url = info['url'] odist.download_urls.add(url) odist.digests[url] = self._get_digest(info) result['urls'].setdefault(version, set()).add(url) result['digests'][url] = self._get_digest(info) # for info in urls: # md.source_url = info['url'] # dist.digest = self._get_digest(info) # dist.locator = self # for info in urls: # url = info['url'] # result['urls'].setdefault(md.version, set()).add(url) # result['digests'][url] = self._get_digest(info) except Exception as e: self.errors.put(text_type(e)) logger.exception('JSON fetch failed: %s', e) return result class Page(object): """ This class represents a scraped HTML page. """ # The following slightly hairy-looking regex just looks for the contents of # an anchor link, which has an attribute "href" either immediately preceded # or immediately followed by a "rel" attribute. The attribute values can be # declared with double quotes, single quotes or no quotes - which leads to # the length of the expression. _href = re.compile(""" (rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*))\s+)? href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)) (\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)))? """, re.I | re.S | re.X) _base = re.compile(r"""]+)""", re.I | re.S) def __init__(self, data, url): """ Initialise an instance with the Unicode page contents and the URL they came from. """ self.data = data self.base_url = self.url = url m = self._base.search(self.data) if m: self.base_url = m.group(1) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) @cached_property def links(self): """ Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) result.add((url, rel)) # We sort the result, hoping to bring the most recent versions # to the front result = sorted(result, key=lambda t: t[0], reverse=True) return result class SimpleScrapingLocator(Locator): """ A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. """ # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), 'none': lambda b: b, } def __init__(self, url, timeout=None, num_workers=10, **kwargs): """ Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. """ super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() # See issue #45: we need to be resilient when the locator is used # in a thread, e.g. with concurrent.futures. We can't use self._lock # as it is for coordinating our internal threads - the ones created # in _prepare_threads. self._gplock = threading.RLock() def _prepare_threads(self): """ Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). """ self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) t.setDaemon(True) t.start() self._threads.append(t) def _wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = [] def _get_project(self, name): result = {'urls': {}, 'digests': {}} with self._gplock: self.result = result self.project_name = name url = urljoin(self.base_url, '%s/' % quote(name)) self._seen.clear() self._page_cache.clear() self._prepare_threads() try: logger.debug('Queueing %s', url) self._to_fetch.put(url) self._to_fetch.join() finally: self._wait_threads() del self.result return result platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' r'win(32|-amd64)|macosx-?\d+)\b', re.I) def _is_platform_dependent(self, url): """ Does an URL refer to a platform-specific download? """ return self.platform_dependent.search(url) def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info def _should_queue(self, link, referrer, rel): """ Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. """ scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result def _fetch(self): """ Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. """ while True: url = self._to_fetch.get() try: if url: page = self.get_page(url) if page is None: # e.g. after an error continue for link, rel in page.links: if link not in self._seen: self._seen.add(link) if (not self._process_download(link) and self._should_queue(link, url, rel)): logger.debug('Queueing %s from %s', link, url) self._to_fetch.put(link) except Exception as e: # pragma: no cover self.errors.put(text_type(e)) finally: # always do this, to avoid hangs :-) self._to_fetch.task_done() if not url: #logger.debug('Sentinel seen, quitting.') break def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile(']*>([^<]+)<') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() page = self.get_page(self.base_url) if not page: raise DistlibException('Unable to get %s' % self.base_url) for match in self._distname_re.finditer(page.data): result.add(match.group(1)) return result class DirectoryLocator(Locator): """ This class locates distributions in a directory tree. """ def __init__(self, path, **kwargs): """ Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, """ self.recursive = kwargs.pop('recursive', True) super(DirectoryLocator, self).__init__(**kwargs) path = os.path.abspath(path) if not os.path.isdir(path): # pragma: no cover raise DistlibException('Not a directory: %r' % path) self.base_dir = path def should_include(self, filename, parent): """ Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. """ return filename.endswith(self.downloadable_extensions) def _get_project(self, name): result = {'urls': {}, 'digests': {}} for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, name) if info: self._update_version_data(result, info) if not self.recursive: break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) if not self.recursive: break return result class JSONLocator(Locator): """ This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. """ def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {'urls': {}, 'digests': {}} data = get_project_data(name) if data: for info in data.get('files', []): if info['ptype'] != 'sdist' or info['pyversion'] != 'source': continue # We don't store summary in project metadata as it makes # the data bigger for no benefit during dependency # resolution dist = make_dist(data['name'], info['version'], summary=data.get('summary', 'Placeholder for summary'), scheme=self.scheme) md = dist.metadata md.source_url = info['url'] # TODO SHA256 digest if 'digest' in info and info['digest']: dist.digest = ('md5', info['digest']) md.dependencies = info.get('requirements', {}) dist.exports = info.get('exports', {}) result[dist.version] = dist result['urls'].setdefault(dist.version, set()).add(info['url']) return result class DistPathLocator(Locator): """ This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. """ def __init__(self, distpath, **kwargs): """ Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. """ super(DistPathLocator, self).__init__(**kwargs) assert isinstance(distpath, DistributionPath) self.distpath = distpath def _get_project(self, name): dist = self.distpath.get_distribution(name) if dist is None: result = {'urls': {}, 'digests': {}} else: result = { dist.version: dist, 'urls': {dist.version: set([dist.source_url])}, 'digests': {dist.version: set([None])} } return result class AggregatingLocator(Locator): """ This class allows you to chain and/or merge a list of locators. """ def __init__(self, *locators, **kwargs): """ Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). """ self.merge = kwargs.pop('merge', False) self.locators = locators super(AggregatingLocator, self).__init__(**kwargs) def clear_cache(self): super(AggregatingLocator, self).clear_cache() for locator in self.locators: locator.clear_cache() def _set_scheme(self, value): self._scheme = value for locator in self.locators: locator.scheme = value scheme = property(Locator.scheme.fget, _set_scheme) def _get_project(self, name): result = {} for locator in self.locators: d = locator.get_project(name) if d: if self.merge: files = result.get('urls', {}) digests = result.get('digests', {}) # next line could overwrite result['urls'], result['digests'] result.update(d) df = result.get('urls') if files and df: for k, v in files.items(): if k in df: df[k] |= v else: df[k] = v dd = result.get('digests') if digests and dd: dd.update(digests) else: # See issue #18. If any dists are found and we're looking # for specific constraints, we only return something if # a match is found. For example, if a DirectoryLocator # returns just foo (1.0) while we're looking for # foo (>= 2.0), we'll pretend there was nothing there so # that subsequent locators can be queried. Otherwise we # would just return foo (1.0) which would then lead to a # failure to find foo (>= 2.0), because other locators # weren't searched. Note that this only matters when # merge=False. if self.matcher is None: found = True else: found = False for k in d: if self.matcher.match(k): found = True break if found: result = d break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for locator in self.locators: try: result |= locator.get_distribution_names() except NotImplementedError: pass return result # We use a legacy scheme simply because most of the dists on PyPI use legacy # versions which don't conform to PEP 426 / PEP 440. default_locator = AggregatingLocator( JSONLocator(), SimpleScrapingLocator('https://pypi.python.org/simple/', timeout=3.0), scheme='legacy') locate = default_locator.locate NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*' r'\(\s*(==\s*)?(?P[^)]+)\)$') class DependencyFinder(object): """ Locate dependencies for distributions. """ def __init__(self, locator=None): """ Initialise an instance, using the specified locator to locate distributions. """ self.locator = locator or default_locator self.scheme = get_scheme(self.locator.scheme) def add_distribution(self, dist): """ Add a distribution to the finder. This will update internal information about who provides what. :param dist: The distribution to add. """ logger.debug('adding distribution %s', dist) name = dist.key self.dists_by_name[name] = dist self.dists[(name, dist.version)] = dist for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) self.provided.setdefault(name, set()).add((version, dist)) def remove_distribution(self, dist): """ Remove a distribution from the finder. This will update internal information about who provides what. :param dist: The distribution to remove. """ logger.debug('removing distribution %s', dist) name = dist.key del self.dists_by_name[name] del self.dists[(name, dist.version)] for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Remove from provided: %s, %s, %s', name, version, dist) s = self.provided[name] s.remove((version, dist)) if not s: del self.provided[name] def get_matcher(self, reqt): """ Get a version matcher for a requirement. :param reqt: The requirement :type reqt: str :return: A version matcher (an instance of :class:`distlib.version.Matcher`). """ try: matcher = self.scheme.matcher(reqt) except UnsupportedVersionError: # pragma: no cover # XXX compat-mode if cannot read the version name = reqt.split()[0] matcher = self.scheme.matcher(name) return matcher def find_providers(self, reqt): """ Find the distributions which can fulfill a requirement. :param reqt: The requirement. :type reqt: str :return: A set of distribution which can fulfill the requirement. """ matcher = self.get_matcher(reqt) name = matcher.key # case-insensitive result = set() provided = self.provided if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: result.add(provider) break return result def try_to_replace(self, provider, other, problems): """ Attempt to replace one provider with another. This is typically used when resolving dependencies from multiple sources, e.g. A requires (B >= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. """ rlist = self.reqts[other] unmatched = set() for s in rlist: matcher = self.get_matcher(s) if not matcher.match(provider.version): unmatched.add(s) if unmatched: # can't replace other with provider problems.add(('cantreplace', provider, other, frozenset(unmatched))) result = False else: # can replace other with provider self.remove_distribution(other) del self.reqts[other] for s in rlist: self.reqts.setdefault(provider, set()).add(s) self.add_distribution(provider) result = True return result def find(self, requirement, meta_extras=None, prereleases=False): """ Find a distribution and all distributions it depends on. :param requirement: The requirement specifying the distribution to find, or a Distribution instance. :param meta_extras: A list of meta extras such as :test:, :build: and so on. :param prereleases: If ``True``, allow pre-release versions to be returned - otherwise, don't return prereleases unless they're all that's available. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. """ self.provided = {} self.dists = {} self.dists_by_name = {} self.reqts = {} meta_extras = set(meta_extras or []) if ':*:' in meta_extras: meta_extras.remove(':*:') # :meta: and :run: are implicitly included meta_extras |= set([':test:', ':build:', ':dev:']) if isinstance(requirement, Distribution): dist = odist = requirement logger.debug('passed %s as requirement', odist) else: dist = odist = self.locator.locate(requirement, prereleases=prereleases) if dist is None: raise DistlibException('Unable to locate %r' % requirement) logger.debug('located %s', odist) dist.requested = True problems = set() todo = set([dist]) install_dists = set([odist]) while todo: dist = todo.pop() name = dist.key # case-insensitive if name not in self.dists_by_name: self.add_distribution(dist) else: #import pdb; pdb.set_trace() other = self.dists_by_name[name] if other != dist: self.try_to_replace(dist, other, problems) ireqts = dist.run_requires | dist.meta_requires sreqts = dist.build_requires ereqts = set() if dist in install_dists: for key in ('test', 'build', 'dev'): e = ':%s:' % key if e in meta_extras: ereqts |= getattr(dist, '%s_requires' % key) all_reqts = ireqts | sreqts | ereqts for r in all_reqts: providers = self.find_providers(r) if not providers: logger.debug('No providers found for %r', r) provider = self.locator.locate(r, prereleases=prereleases) # If no provider is found and we didn't consider # prereleases, consider them now. if provider is None and not prereleases: provider = self.locator.locate(r, prereleases=True) if provider is None: logger.debug('Cannot satisfy %r', r) problems.add(('unsatisfied', r)) else: n, v = provider.key, provider.version if (n, v) not in self.dists: todo.add(provider) providers.add(provider) if r in ireqts and dist in install_dists: install_dists.add(provider) logger.debug('Adding %s to install_dists', provider.name_and_version) for p in providers: name = p.key if name not in self.dists_by_name: self.reqts.setdefault(p, set()).add(r) else: other = self.dists_by_name[name] if other != p: # see if other can be replaced by p self.try_to_replace(p, other, problems) dists = set(self.dists.values()) for dist in dists: dist.build_time_dependency = dist not in install_dists if dist.build_time_dependency: logger.debug('%s is a build-time dependency only.', dist.name_and_version) logger.debug('find done for %s', odist) return dists, problems PK.e[7JJdistlib/metadata.pycnu[ abc@sdZddlmZddlZddlmZddlZddlZddlZddl m Z m Z ddl m Z mZmZddlmZdd lmZmZdd lmZmZejeZd e fd YZd e fdYZde fdYZde fdYZdddgZdZ dZ!ej"dZ#ej"dZ$ddddddd d!d"d#d$f Z%ddddd%ddd d!d"d#d$d&d'd(d)d*fZ&d(d)d*d&d'fZ'ddddd%ddd d!d"d#d+d,d$d&d'd-d.d/d0d1d2fZ(d/d0d1d-d2d+d,d.fZ)ddddd%ddd d!d"d#d+d,d$d&d'd-d.d/d0d1d2d3d4d5d6d7fZ*d3d7d4d5d6fZ+e,Z-e-j.e%e-j.e&e-j.e(e-j.e*ej"d8Z/d9Z0d:Z1idd;6dd<6dd=6dd>6d%d?6dd@6ddA6d dB6d!dC6d"dD6d#dE6d+dF6d,dG6d$dH6d&dI6d'dJ6d-dK6d/dL6d0dM6d5dN6d1dO6d2dP6d*dQ6d)dR6d(dS6d.dT6d3dU6d4dV6d6dW6d7dX6Z2d0d-d/fZ3d1fZ4dfZ5dd&d(d*d)d-d/d0d2d.d%d5d7d6fZ6d.fZ7d fZ8d"d+ddfZ9e:Z;ej"dYZ<e=dZZ>d[e:fd\YZ?d]Z@d^ZAd_e:fd`YZBdS(auImplementation of the Metadata for Python packages PEPs. Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). i(tunicode_literalsN(tmessage_from_filei(tDistlibExceptiont __version__(tStringIOt string_typest text_type(t interpret(textract_by_keyt get_extras(t get_schemetPEP440_VERSION_REtMetadataMissingErrorcBseZdZRS(uA required metadata is missing(t__name__t __module__t__doc__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR stMetadataConflictErrorcBseZdZRS(u>Attempt to read or write metadata fields that are conflictual.(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR st MetadataUnrecognizedVersionErrorcBseZdZRS(u Unknown metadata version number.(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR$stMetadataInvalidErrorcBseZdZRS(uA metadata value is invalid(R RR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR(suMetadatauPKG_INFO_ENCODINGuPKG_INFO_PREFERRED_VERSIONuutf-8u1.1u \|u uMetadata-VersionuNameuVersionuPlatformuSummaryu DescriptionuKeywordsu Home-pageuAuthoru Author-emailuLicenseuSupported-Platformu Classifieru Download-URLu ObsoletesuProvidesuRequiresu MaintaineruMaintainer-emailuObsoletes-Distu Project-URLu Provides-Distu Requires-DistuRequires-PythonuRequires-ExternaluPrivate-Versionu Obsoleted-ByuSetup-Requires-Distu ExtensionuProvides-Extrau"extra\s*==\s*("([^"]+)"|'([^']+)')cCsP|dkrtS|dkr tS|dkr0tS|dkr@tSt|dS(Nu1.0u1.1u1.2u2.0(t _241_FIELDSt _314_FIELDSt _345_FIELDSt _426_FIELDSR(tversion((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_version2fieldlistgs    c Csd}g}xB|jD]4\}}|gdd fkrCqn|j|qWddddg}x|D]}|tkrd|kr|jdn|tkrd|kr|jdn|tkrd|kr|jdn|tkrmd|krm|jdqmqmWt|dkr1|dSt|dkrRt d nd|koj||t }d|ko||t }d|ko||t }t |t |t |dkrt d n| r| r| rt|krtSn|r dS|rdSdS( u5Detect the best version depending on the fields used.cSs%x|D]}||krtSqWtS(N(tTruetFalse(tkeystmarkerstmarker((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt _has_markerus  uUNKNOWNu1.0u1.1u1.2u2.0iiuUnknown metadata setu(You used incompatible 1.1/1.2/2.0 fieldsN(titemstNonetappendRtremoveRRRtlenRt _314_MARKERSt _345_MARKERSt _426_MARKERStinttPKG_INFO_PREFERRED_VERSION( tfieldsRRtkeytvaluetpossible_versionstis_1_1tis_1_2tis_2_0((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt _best_versionssB  & umetadata_versionunameuversionuplatformusupported_platformusummaryu descriptionukeywordsu home_pageuauthoru author_emailu maintainerumaintainer_emailulicenseu classifieru download_urluobsoletes_distu provides_distu requires_distusetup_requires_disturequires_pythonurequires_externalurequiresuprovidesu obsoletesu project_urluprivate_versionu obsoleted_byu extensionuprovides_extrau[^A-Za-z0-9.]+cCsG|r9tjd|}tjd|jdd}nd||fS(uhReturn the distribution name with version. If for_filename is true, return a filename-escaped form.u-u u.u%s-%s(t _FILESAFEtsubtreplace(tnameRt for_filename((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_get_name_and_versions!tLegacyMetadatacBs4eZdZdddddZdZdZdZdZdZ dZ d Z d Z d Z d Zed ZdZdZdZdZedZedZddZdZedZedZedZdZdZdZdZ dZ!dZ"RS( uaThe legacy metadata of a release. Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name udefaultcCs|||gjddkr-tdni|_g|_d|_||_|dk rm|j|nB|dk r|j|n&|dk r|j ||j ndS(Niu'path, fileobj and mapping are exclusive( tcountR t TypeErrort_fieldstrequires_filest _dependenciestschemetreadt read_filetupdatetset_metadata_version(tselftpathtfileobjtmappingR=((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt__init__s        cCst|j|jdJscCst|}|d|jdtj|ddd}z|j||Wd|jXdS(u&Write the metadata fields to filepath.uwRbuutf-8N(RcRdt write_fileRe(RBRft skip_unknownRg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRGhscCs<|jx+t|dD]}|j|}|rT|dgdgfkrTqn|tkr|j||dj|qn|tkr|dkr|jd kr|jdd}q|jdd }n|g}n|t krg|D]}dj|^q}nx!|D]}|j|||qWqWd S( u0Write the PKG-INFO format data to a file object.uMetadata-VersionuUNKNOWNu,u Descriptionu1.0u1.1u u u |N(u1.0u1.1( RARRIRVRHtjoinRURXR3Ri(RBt fileobjectRqRnRoR+((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRpps$      % c sfd}|sn^t|drRxL|jD]}||||q4Wn$x!|D]\}}|||qYW|rx*|jD]\}}|||qWndS(uSet metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. cs2|tkr.|r.jj||ndS(N(RTRKRM(R*R+(RB(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_setsukeysN(thasattrRR(RBtothertkwargsRttktv((RBs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR@s cCs|j|}|tks'|dkrt|ttf rt|trwg|jdD]}|j^q\}qg}nF|tkrt|ttf rt|tr|g}qg}nt j t j r|d}t |j}|tkrR|d k rRx|D];}|j|jddst jd|||qqWq|tkr|d k r|j|st jd|||qq|tkr|d k r|j|st jd|||qqn|tkr|dkr|j|}qn||j|d?d@f }i}x;|D]3\}}| sf||jkrD|||||D]3\}}| sk||jkrI||||(t __class__R R4R(RB((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt__repr__msN(#R RRR RFRARHRJRLRPRQRMRWR[R]RR_R`RaR>R?RGRpR@RKRRIRRRRRRoRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR7s>                     ,  , ;    u pydist.jsonu metadata.jsontMetadatacBseZdZejdZejdejZeZ ejdZ dZ de Z id>d6d?d6d@d 6Zd Zd ZiedAfd 6edBfd6e dCfd6e dDfd 6ZdEZdFdFdFddZedGZdFefZdFefZi defd6defd6ed6ed6ed6defd6ed6ed6ed6ed 6d!efd"6dHd$6dId 6Z[[d&ZdFd'Zd(Zed)Z ed*Z!e!j"d+Z!dFdFd,Z#ed-Z$ed.Z%e%j"d/Z%d0Z&d1Z'd2Z(d3Z)id4d6d5d6d6d6d7d 6d8d96d!d"6Z*d:Z+dFdFe,e-d;Z.d<Z/d=Z0RS(Ju The metadata of a release. This implementation uses 2.0 (JSON) metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. u ^\d+(\.\d+)*$u!^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$u .{1,2047}u2.0u distlib (%s)unameuversionulegacyusummaryuqname version license summary description author author_email keywords platform home_page classifiers download_urluwextras run_requires test_requires build_requires dev_requires provides meta_requires obsoleted_by supports_environmentsumetadata_versionu_legacyu_datauschemeudefaultcCs|||gjddkr-tdnd|_d|_||_|dk ry|j||||_Wqtk rtd|d||_|j qXnd}|rt |d}|j }WdQXn|r|j }n|dkri|j d6|j d6|_nt|ts?|jd}ny)tj||_|j|j|Wn9tk rtd t|d||_|j nXdS( Niu'path, fileobj and mapping are exclusiveRER=urbumetadata_versionu generatoruutf-8RD(R8R R9t_legacyt_dataR=t_validate_mappingRR7tvalidateRdR>tMETADATA_VERSIONt GENERATORRzRtdecodetjsontloadst ValueErrorR(RBRCRDRER=Rtf((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRFs>          ulicenseukeywordsu Requires-Distu run_requiresuSetup-Requires-Distubuild_requiresu dev_requiresu test_requiresu meta_requiresuProvides-Extrauextrasumodulesu namespacesuexportsucommandsu Classifieru classifiersu Download-URLu source_urluMetadata-Versionc Cstj|d}tj|d}||kr||\}}|jr|dkrs|dkrgdn|}q|jj|}q|dkrdn|}|d kr|jj||}qt}|}|jjd} | r|dkr| jd |}q|dkrH| jd } | r| j||}qq| jd } | sr|jjd } n| r| j||}qn||kr|}qnQ||krtj||}n0|jr|jj|}n|jj|}|S( Nu common_keysu mapped_keysucommandsuexportsumodulesu namespacesu classifiersu extensionsupython.commandsupython.detailsupython.exports(ucommandsuexportsumodulesu namespacesu classifiers(tobjectt__getattribute__RR RIR( RBR*tcommontmappedtlktmakertresultR+tsentineltd((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRsF           cCso||jkrk|j|\}}|p.|j|krk|j|}|shtd||fqhqkndS(Nu.'%s' is an invalid value for the '%s' property(tSYNTAX_VALIDATORSR=tmatchR(RBR*R+R=tpatternt exclusionstm((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt_validate_valuescCs|j||tj|d}tj|d}||kr||\}}|jr~|dkrntn||j|               cCst|j|jtS(N(R6R4RR(RB((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pytname_and_version@scCsd|jr|jd}n|jjdg}d|j|jf}||kr`|j|n|S(Nu Provides-Distuprovidesu%s (%s)(RRRR4RR!(RBRts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pytprovidesDs  cCs*|jr||jd}||krL|dkrsd }n|}||||d|kr>|}Pq>q>W|dkri|d6}|jd|n*t|dt|B}t||d(R4RRR RX(RBR4R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyR(s (((ulegacy((ulegacy(ulegacy(ulegacy(u_legacyu_datauschemeN(unameuversionulicenseukeywordsusummary(u Download-URLN(uMetadata-VersionN(1R RRtretcompiletMETADATA_VERSION_MATCHERtIt NAME_MATCHERR tVERSION_MATCHERtSUMMARY_MATCHERRRRRRRRt __slots__R RFRKt common_keysR{t none_listtdictt none_dictt mapped_keysRRRtpropertyRRtsetterRRRRRRRRRRRRGRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyRvs       ,         + ' *     % (CRt __future__RRctemailRRRRtRRtcompatRRRRRtutilRR RR R t getLoggerR R}R RRRt__all__tPKG_INFO_ENCODINGR(RRZRYRRR$RR%RR&RKRRR@tEXTRA_RERR0RTRRRRURiRVRRRR1RR6R7tMETADATA_FILENAMEtWHEEL_METADATA_FILENAMER(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyt s                                         8            PK.e[F22distlib/util.pyonu[ abc@sddlZddlmZddlZddlZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZyddlZWnek rdZnXddlZddlZddlZddlZddlZyddlZWnek r9ddlZnXddlZddlmZddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0e j1e2Z3dZ4e j5e4Z6dZ7d e7d Z8e7d Z9d Z:d e:de9de4d e:de9dZ;dZ<de;de<de;dZ=e8d e4e8dZ>de>dZ?de7de?de=dZ@e j5e@ZAde:de9d ZBe j5eBZCdZDd ZEd!ZFd"ZGddd#ZHd$ZId%ZJd&ZKejLd'ZMejLd(ZNejLd)d*ZOd+ePfd,YZQd-ZRd.ePfd/YZSd0ZTd1ePfd2YZUe j5d3e jVZWd4ZXdd5ZYd6ZZd7Z[d8Z\d9Z]d:Z^e j5d;e j_Z`e j5d<Zadd=Zbe j5d>Zcd?Zdd@ZedAZfdBZgdCZhdDZidEePfdFYZjdGePfdHYZkdIePfdJYZldZmdendRZodSZpdZqdZePfd[YZre j5d\Zse j5d]Zte j5d^Zud_Zd`ZverddalmwZxmyZymzZzdbe%j{fdcYZ{ddexfdeYZwdfewe(fdgYZ|nej}dh Z~e~dkr dje%jfdkYZer dle%jfdmYZq ndne&jfdoYZerFdpe&jfdqYZndre&jfdsYZdtZduePfdvYZdwefdxYZdyefdzYZd{e)fd|YZd}ePfd~YZdZdS(iN(tdeque(tiglobi(tDistlibException(t string_typest text_typetshutilt raw_inputtStringIOtcache_from_sourceturlopenturljointhttplibt xmlrpclibt splittypet HTTPHandlertBaseConfiguratort valid_identt Containert configparsertURLErrortZipFiletfsdecodetunquotes\s*,\s*s (\w|[.-])+s(\*|:(\*|\w+):|t)s\*?s([<>=!~]=)|[<>]t(s)?\s*(s)(s)\s*(s))*s(from\s+(?P.*))s \(\s*(?Pt|s)\s*\)|(?Ps\s*)s)*s \[\s*(?Ps)?\s*\]s(?Ps \s*)?(\s*s)?$s(?Ps )\s*(?Pc Cskd}d}tj|}|rg|j}|d}|dpK|d}|dsad}nd}|dj}|sd}d}|d} n{|ddkrd |}ntj|} g| D]}||^q}d |d jg|D]} d | ^qf} |d s$d} ntj |d } t d|d|d| d| d|d|}n|S(NcSs|j}|d|dfS(Ntoptvn(t groupdict(tmtd((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_constraintYs tdntc1tc2tdireftis<>!=s~=s%s (%s)s, s%s %stextnamet constraintstextrast requirementtsourceturl( tNonetREQUIREMENT_REtmatchRtstriptRELOP_IDENT_REtfinditertjointCOMMA_REtsplitR( tsRtresultRRR&tconsR+tconstrtrstiteratortconR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_requirementWs4       0  cCsd}i}x|D]\}}}tjj||}xt|D]}tjj||} xt| D]v} ||| } |dkr|j| dqo||| } |jtjjdjd} | d| || RAtrstrip(tresources_roottrulesREt destinationsRDtsuffixtdesttprefixtabs_basetabs_globtabs_patht resource_filetrel_pathtrel_dest((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_resources_dests|s  !cCs:ttdrt}ntjttdtjk}|S(Nt real_prefixt base_prefix(thasattrtsystTrueRMtgetattr(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytin_venvs cCs7tjjtj}t|ts3t|}n|S(N(R?R@tnormcaseRXt executablet isinstanceRR(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_executables cCs|}xwtrt|}|}| r7|r7|}n|r |dj}||kr]Pn|r|d|||f}q|q q W|S(Nis %c: %s %s(RYRtlower(tpromptt allowed_charst error_prompttdefaulttpR5tc((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytproceeds     cCsVt|tr|j}ni}x+|D]#}||kr+||||R$cCstjj|}||jkrtjj| r|jj|tjj|\}}|j|tj d||j stj |n|j r|j j|qndS(Ns Creating %s(R?R@RRRRR4RRRRtmkdirRR(RR@RR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs"   cCst|| }tjd|||js|sD|j||rf|sSd}qf|t|}ntj|||t n|j ||S(NsByte-compiling %s to %s( RRRRRR,RBt py_compiletcompileRYR(RR@toptimizetforceRMtdpathtdiagpath((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt byte_compiles   cCstjj|rtjj|rtjj| rtjd||js`tj |n|j r ||j kr|j j |qq qtjj|rd}nd}tjd|||jstj |n|j r||j kr |j j |q qndS(NsRemoving directory tree at %stlinktfilesRemoving %s %s(R?R@RtisdirRRtdebugRRRRRRR(RR@R5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytensure_removeds"%     cCsjt}x]|setjj|r:tj|tj}Pntjj|}||kr\Pn|}q W|S(N(RR?R@RtaccesstW_OKR(RR@R6tparent((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt is_writables   cCs |j|jf}|j|S(sV Commit recorded changes, turn off recording, return changes. (RRR(RR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytcommits cCs|jsx9t|jD](}tjj|rtj|qqWt|jdt }x\|D]Q}tj |}|rtjj ||d}tj |ntj |qaWn|j dS(Ntreversei(RtlistRR?R@RRtsortedRRYtlistdirR2trmdirR(RRtdirsRtflisttsd((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytrollbacks  N(RRRRRRRRYRR,RRRRtset_executable_modeRRRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRQs             cCs|tjkrtj|}n t|}|dkr@|}nG|jd}t||jd}x|D]}t||}qnW|S(Nt.i(RXtmodulest __import__R,R4RZRF(t module_namet dotted_pathtmodR6tpartsRe((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytresolves    t ExportEntrycBs;eZdZedZdZdZejZRS(cCs(||_||_||_||_dS(N(R&RMRKR(RR&RMRKR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs   cCst|j|jS(N(RRMRK(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRscCs d|j|j|j|jfS(Ns(R&RMRKR(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt__repr__!scCsdt|tst}nH|j|jko]|j|jko]|j|jko]|j|jk}|S(N(R^RRR&RMRKR(RtotherR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt__eq__%s ( RRRRRR R Rt__hash__(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs    s(?P(\w|[-.+])+) \s*=\s*(?P(\w+)([:\.]\w+)*) \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? c CsStj|}|sId}d|ks3d|krOtd|qOn|j}|d}|d}|jd}|dkr|d}}n4|dkrtd|n|jd\}}|d } | dkrd|ksd|kr td|ng} n(g| jd D]} | j^q"} t|||| }|S( Nt[t]sInvalid specification '%s'R&tcallablet:iiRt,( tENTRY_REtsearchR,RRtcountR4R/R( t specificationRR6RR&R@tcolonsRMRKRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRy7s2          (cCs|d krd}ntjdkrHdtjkrHtjjd}ntjjd}tjj|rtj|tj }|st j d|qnGytj |t }Wn-tk rt j d|dt t}nX|s tj}t j d |ntjj||S( s Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. s.distlibtntt LOCALAPPDATAs $localappdatat~s(Directory exists but is not writable: %ssUnable to create %stexc_infos#Default location unusable, using %sN(R,R?R&tenvironR@t expandvarst expanduserRRRRtwarningtmakedirsRYtOSErrorRRRR2(RKR6tusable((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_cache_baseVs&       cCs`tjjtjj|\}}|r?|jdd}n|jtjd}||dS(s Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. Rs---s--s.cache(R?R@t splitdriveRR>RA(R@RRe((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytpath_to_cache_dirs $cCs|jds|dS|S(NR=(tendswith(R5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt ensure_slashscCskd}}d|kr^|jdd\}}d|krC|}q^|jdd\}}n|||fS(Nt@iR(R,R4(tnetloctusernametpasswordRM((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_credentialss    cCs tjd}tj||S(Ni(R?tumask(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_process_umasks cCsFt}d}x3t|D]%\}}t|tst}PqqW|S(N(RYR,t enumerateR^RR(tseqR6tiR5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytis_string_sequencess3([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-([a-z0-9_.+-]+)s -py(\d\.?\d?)cCsd}d}t|jdd}tj|}|r[|jd}||j }n|rt|t|dkrtj tj |d|}|r|j }|| ||d|f}qn|dkrt j |}|r|jd|jd|f}qn|S(sw Extract name, version, python version from a filename (no extension) Return name, version, pyver or None t t-is\biN( R,RR>tPYTHON_VERSIONRRtstartRBtreR.tescapetendtPROJECT_NAME_AND_VERSION(tfilenamet project_nameR6tpyverRtn((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytsplit_filenames"" ! 's-(?P[\w .-]+)\s*\(\s*(?P[^\s)]+)\)$cCsRtj|}|s(td|n|j}|djj|dfS(s A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. s$Ill-formed name/version string: '%s'R&tver(tNAME_VERSION_RER.RRR/R`(ReRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_name_and_versions  cCs t}t|pg}t|p'g}d|krS|jd||O}nx|D]}|dkr||j|qZ|jdr|d}||krtjd|n||kr|j|qqZ||krtjd|n|j|qZW|S(Nt*R3isundeclared extra: %s(RRRt startswithRR(t requestedt availableR6trtunwanted((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt get_extrass&          cCsi}yqt|}|j}|jd}|jdsRtjd|n$tjd|}tj |}Wn&t k r}tj d||nX|S(Ns Content-Typesapplication/jsons(Unexpected response for JSON request: %ssutf-8s&Failed to get external data for %s: %s( R RtgetRCRRRsRtRvRwRzt exception(R+R6tresptheaderstcttreaderte((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt_get_external_datas  s'https://www.red-dove.com/pypi/projects/cCs9d|dj|f}tt|}t|}|S(Ns%s/%s/project.jsoni(tupperR t_external_data_base_urlRP(R&R+R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_project_datas cCs6d|dj||f}tt|}t|S(Ns%s/%s/package-%s.jsoni(RQR RRRP(R&tversionR+((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_package_datastCachecBs)eZdZdZdZdZRS(s A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. cCsvtjj|s"tj|ntj|jd@dkrQtjd|ntjjtjj ||_ dS(su Initialise an instance. :param base: The base directory where the cache should be located. i?isDirectory '%s' is not privateN( R?R@RRRRRRRtnormpathRD(RRD((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR"s cCs t|S(sN Converts a resource prefix to a directory name in the cache. (R$(RRM((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt prefix_to_dir0scCsg}xtj|jD]}tjj|j|}yZtjj|s^tjj|rntj|n"tjj|rt j |nWqt k r|j |qXqW|S(s" Clear the cache. ( R?RRDR@R2RRRRRRRztappend(Rt not_removedtfn((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytclear6s$ (RRt__doc__RRXR\(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRVs  t EventMixincBs>eZdZdZedZdZdZdZRS(s1 A very simple publish/subscribe system. cCs i|_dS(N(t _subscribers(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRKscCs\|j}||kr+t|g|| %s;s %s;t}s (RkRYRmR2(RR6RtRvRsRn((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytdot s    ( RRRRoRRqRRRyRtpropertyRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRjs      3s.tar.gzs.tar.bz2s.tars.zips.tgzs.tbzs.whlc sfd}tjjtd}|dkr|jdrZd}q|jdrxd}d}q|jdrd }d }q|jd rd}d}qtd|nz|dkrt|d}|rZ|j}x|D]}||qWqZnBt j ||}|rZ|j }x|D]}||qCWn|dkrt j ddkrxA|jD]0} t| jts| jjd| _qqWn|jWd|r|jnXdS(Ncs|t|ts!|jd}ntjjtjj|}|j se|tjkrxt d|ndS(Nsutf-8spath outside destination: %r( R^RtdecodeR?R@RR2RCRAR(R@Re(tdest_dirtplen(s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt check_paths !#s.zips.whltzips.tar.gzs.tgzttgzsr:gzs.tar.bz2s.tbzttbzsr:bz2s.tarttarRFsUnknown format for %riisutf-8(s.zips.whl(s.tar.gzs.tgz(s.tar.bz2s.tbz(R?R@RRBR,R%RRtnamelistttarfileRtgetnamesRXRrt getmembersR^R&RRt extractallR~( tarchive_filenameRtformatRRtarchiveRtnamesR&ttarinfo((RRs</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt unarchivesH           c Cstj}t|}t|d}xutj|D]d\}}}xR|D]J}tjj||}||} tjj| |} |j|| qPWq:WWdQX|S(s*zip a directory tree into a BytesIO objectRN( tiotBytesIORBRR?twalkR@R2R( t directoryR6tdlentzftrootRRR&tfulltrelRL((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytzip_dirSs    R$tKtMtGtTtPtProgresscBseZdZdddZdZdZdZdZedZ ed Z d Z ed Z ed Z RS( tUNKNOWNiidcCs8||_|_||_d|_d|_t|_dS(Ni(RtcurtmaxR,tstartedtelapsedRtdone(Rtminvaltmaxval((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRjs    cCsD||_tj}|jdkr0||_n||j|_dS(N(RttimeRR,R(Rtcurvaltnow((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytupdaters    cCs|j|j|dS(N(RR(Rtincr((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt increment|scCs|j|j|S(N(RR(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR5scCs/|jdk r"|j|jnt|_dS(N(RR,RRYR(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytstopscCs|jdkr|jS|jS(N(RR,tunknown(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytmaximumscCsZ|jrd}nD|jdkr*d}n,d|j|j|j|j}d|}|S(Ns100 %s ?? %gY@s%3d %%(RRR,RR(RR6R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt percentages   " cCsU|dkr|jdks-|j|jkr6d}ntjdtj|}|S(Nis??:??:??s%H:%M:%S(RR,RRRtstrftimetgmtime(RtdurationR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytformat_durations- cCs|jrd}|j}nd}|jdkr9d}ne|jdksZ|j|jkrcd}n;t|j|j}||j|j:}|d|j}d||j|fS(NtDonesETA iiis%s: %s(RRRR,RRtfloatR(RRMtt((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytETAs   ! cCsh|jdkrd}n|j|j|j}x(tD] }|dkrLPn|d:}q6Wd||fS(Nigig@@s%d %sB/s(RRRtUNITS(RR6tunit((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytspeeds   (RRRRRRR5RRRRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRgs    s \{([^}]*)\}s[^/\\,{]\*\*|\*\*[^/\\,}]s^[^{]*\}|\{[^}]*$cCsZtj|r(d}t||ntj|rPd}t||nt|S(sAExtended globbing function that supports ** and {opt1,opt2,opt3}.s7invalid glob %r: recursive glob "**" must be used alones2invalid glob %r: mismatching set marker '{' or '}'(t_CHECK_RECURSIVE_GLOBRRt_CHECK_MISMATCH_SETt_iglob(t path_globR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRsc csmtj|d}t|dkr~|\}}}x3|jdD]4}x+tdj|||fD] }|VqhWqCWnd|krxt|D] }|VqWn|jdd\}}|dkrd}n|dkrd}n|jd}|jd}x]tj|D]L\}}} tj j |}x(ttj j||D] } | VqVWqWdS( NiRR$s**RRBR=s\( t RICH_GLOBR4RBRR2t std_iglobRCR?RR@RW( Rtrich_path_globRMRRKtitemR@tradicaltdirRR[((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs(%      "(t HTTPSHandlertmatch_hostnametCertificateErrortHTTPSConnectioncBseZdZeZdZRS(c Cstj|j|jf|j}t|dtrI||_|jnt t ds|j rmt j }n t j }t j||j|jd|dt jd|j |_nt jt j}|jt jO_|jr|j|j|jni}|j rHt j |_|jd|j tt dtrH|j|d!           N(RRR,RRYRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRsRcBs&eZedZdZdZRS(cCs#tj|||_||_dS(N(tBaseHTTPSHandlerRRR(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR#s  cOs7t||}|jr3|j|_|j|_n|S(s This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. (RRR(RRgRhR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt _conn_maker(s   cCs_y|j|j|SWnAtk rZ}dt|jkrTtd|jq[nXdS(Nscertificate verify faileds*Unable to verify server certificate for %s(tdo_openRRtstrtreasonRR(RtreqRO((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt https_open8s(RRRYRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR"s  tHTTPSOnlyHandlercBseZdZRS(cCstd|dS(NsAUnexpected HTTP request on what should be a secure connection: %s(R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt http_openLs(RRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRKsiitHTTPcBseZdddZRS(R$cKs5|dkrd}n|j|j|||dS(Ni(R,t_setupt_connection_class(RRRRh((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRXs  N(RRR,R(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRWstHTTPScBseZdddZRS(R$cKs5|dkrd}n|j|j|||dS(Ni(R,RR (RRRRh((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR`s  N(RRR,R(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR _st TransportcBseZddZdZRS(icCs ||_tjj||dS(N(RR R R(RRt use_datetime((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRgs cCs|j|\}}}tdkr<t|d|j}nN|j sY||jdkr}||_|tj|f|_n|jd}|S(NiiRii(ii(t get_host_infot _ver_infoRRt _connectiont_extra_headersR tHTTPConnection(RRthtehtx509R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytmake_connectionks   (RRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR fs t SafeTransportcBseZddZdZRS(icCs ||_tjj||dS(N(RR RR(RRR ((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRxs cCs|j|\}}}|s'i}n|j|dR3(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR2s     tSubprocessMixincBs)eZeddZdZdZRS(cCs||_||_dS(N(tverbosetprogress(RRBRC((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs cCs|j}|j}x{tr|j}|s1Pn|dk rM|||q|sftjjdntjj|jdtjj qW|j dS(s Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. Rsutf-8N( RCRBRYtreadlineR,RXtstderrRRtflushR~(RRpRRCRBR5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRN"s     cKstj|dtjdtj|}tjd|jd|jdf}|jtjd|jd|jdf}|j|j |j |j |j dk r|j ddn|j rtjjdn|S(NtstdoutRERRgsdone.tmainsdone. (t subprocesstPopentPIPEt threadingtThreadRNRGR5REtwaitR2RCR,RBRXR(RtcmdRhRett1tt2((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt run_command7s$ $     N(RRRR,RRNRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRAs cCstjdd|jS(s,Normalize a python package name a la PEP 503s[-_.]+R3(R6tsubR`(R&((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytnormalize_nameHs(s.tar.gzs.tar.bz2s.tars.zips.tgzs.tbzs.whl(R$RRRRR(ii(Rst collectionsRt contextlibR*tglobRRRRvtloggingR?RR6RRRt ImportErrorR,RIRXRRRRLtdummy_threadingRR$RtcompatRRRRRR R R R R RRRRRRRRRt getLoggerRRtCOMMARR3tIDENTt EXTRA_IDENTtVERSPECtRELOPtBARE_CONSTRAINTSt DIRECT_REFt CONSTRAINTSt EXTRA_LISTtEXTRASt REQUIREMENTR-t RELOP_IDENTR0R<RTR[R_RgRjRRtcontextmanagerRRRRRRRRRtVERBOSERRyR"R$R&R+R-R1tIR9R4R>R@RARHRPRRRSRURVR^RjtARCHIVE_EXTENSIONSRYRRRRRRRRRRRRRRRrRRR R RRR R!R)R.R2RART(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyts                      . %   /       )           ,H6 ] *)   :+PK.e[99distlib/manifest.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Class representing the list of files in a distribution. Equivalent to distutils.filelist, but fixes some problems. """ import fnmatch import logging import os import re import sys from . import DistlibException from .compat import fsdecode from .util import convert_path __all__ = ['Manifest'] logger = logging.getLogger(__name__) # a \ followed by some spaces + EOL _COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M) _COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) # # Due to the different results returned by fnmatch.translate, we need # to do slightly different processing for Python 2.7 and 3.2 ... this needed # to be brought in for Python 3.6 onwards. # _PYTHON_VERSION = sys.version_info[:2] class Manifest(object): """A list of files built by on exploring the filesystem and filtered by applying various patterns to what we find there. """ def __init__(self, base=None): """ Initialise an instance. :param base: The base directory to explore under. """ self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) self.prefix = self.base + os.sep self.allfiles = None self.files = set() # # Public API # def findall(self): """Find all files under the base and set ``allfiles`` to the absolute pathnames of files found. """ from stat import S_ISREG, S_ISDIR, S_ISLNK self.allfiles = allfiles = [] root = self.base stack = [root] pop = stack.pop push = stack.append while stack: root = pop() names = os.listdir(root) for name in names: fullname = os.path.join(root, name) # Avoid excess stat calls -- just one will do, thank you! stat = os.stat(fullname) mode = stat.st_mode if S_ISREG(mode): allfiles.append(fsdecode(fullname)) elif S_ISDIR(mode) and not S_ISLNK(mode): push(fullname) def add(self, item): """ Add a file to the manifest. :param item: The pathname to add. This can be relative to the base. """ if not item.startswith(self.prefix): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item)) def add_many(self, items): """ Add a list of files to the manifest. :param items: The pathnames to add. These can be relative to the base. """ for item in items: self.add(item) def sorted(self, wantdirs=False): """ Return sorted files in directory order """ def add_dir(dirs, d): dirs.add(d) logger.debug('add_dir added %s', d) if d != self.base: parent, _ = os.path.split(d) assert parent not in ('', '/') add_dir(dirs, parent) result = set(self.files) # make a copy! if wantdirs: dirs = set() for f in result: add_dir(dirs, os.path.dirname(f)) result |= dirs return [os.path.join(*path_tuple) for path_tuple in sorted(os.path.split(path) for path in result)] def clear(self): """Clear all collected files.""" self.files = set() self.allfiles = [] def process_directive(self, directive): """ Process a directive which either adds some files from ``allfiles`` to ``files``, or removes some files from ``files``. :param directive: The directive to process. This should be in a format compatible with distutils ``MANIFEST.in`` files: http://docs.python.org/distutils/sourcedist.html#commands """ # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other # three are defined depends on the action; it'll be either # patterns, (dir and patterns), or (dirpattern). action, patterns, thedir, dirpattern = self._parse_directive(directive) # OK, now we know that the action is valid and we have the # right number of words on the line for that action -- so we # can proceed with minimal error-checking. if action == 'include': for pattern in patterns: if not self._include_pattern(pattern, anchor=True): logger.warning('no files found matching %r', pattern) elif action == 'exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=True) #if not found: # logger.warning('no previously-included files ' # 'found matching %r', pattern) elif action == 'global-include': for pattern in patterns: if not self._include_pattern(pattern, anchor=False): logger.warning('no files found matching %r ' 'anywhere in distribution', pattern) elif action == 'global-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=False) #if not found: # logger.warning('no previously-included files ' # 'matching %r found anywhere in ' # 'distribution', pattern) elif action == 'recursive-include': for pattern in patterns: if not self._include_pattern(pattern, prefix=thedir): logger.warning('no files found matching %r ' 'under directory %r', pattern, thedir) elif action == 'recursive-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, prefix=thedir) #if not found: # logger.warning('no previously-included files ' # 'matching %r found under directory %r', # pattern, thedir) elif action == 'graft': if not self._include_pattern(None, prefix=dirpattern): logger.warning('no directories found matching %r', dirpattern) elif action == 'prune': if not self._exclude_pattern(None, prefix=dirpattern): logger.warning('no previously-included directories found ' 'matching %r', dirpattern) else: # pragma: no cover # This should never happen, as it should be caught in # _parse_template_line raise DistlibException( 'invalid action %r' % action) # # Private API # def _parse_directive(self, directive): """ Validate a directive. :param directive: The directive to validate. :return: A tuple of action, patterns, thedir, dir_patterns """ words = directive.split() if len(words) == 1 and words[0] not in ('include', 'exclude', 'global-include', 'global-exclude', 'recursive-include', 'recursive-exclude', 'graft', 'prune'): # no action given, let's use the default 'include' words.insert(0, 'include') action = words[0] patterns = thedir = dir_pattern = None if action in ('include', 'exclude', 'global-include', 'global-exclude'): if len(words) < 2: raise DistlibException( '%r expects ...' % action) patterns = [convert_path(word) for word in words[1:]] elif action in ('recursive-include', 'recursive-exclude'): if len(words) < 3: raise DistlibException( '%r expects ...' % action) thedir = convert_path(words[1]) patterns = [convert_path(word) for word in words[2:]] elif action in ('graft', 'prune'): if len(words) != 2: raise DistlibException( '%r expects a single ' % action) dir_pattern = convert_path(words[1]) else: raise DistlibException('unknown action %r' % action) return action, patterns, thedir, dir_pattern def _include_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Select strings (presumably filenames) from 'self.files' that match 'pattern', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the 'fnmatch' module: '*' and '?' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If 'anchor' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If 'anchor' is false, both of these will match. If 'prefix' is supplied, then only filenames starting with 'prefix' (itself a pattern) and ending with 'pattern', with anything in between them, will match. 'anchor' is ignored in this case. If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and 'pattern' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return True if files are found. """ # XXX docstring lying about what the special chars are? found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) # delayed loading of allfiles list if self.allfiles is None: self.findall() for name in self.allfiles: if pattern_re.search(name): self.files.add(name) found = True return found def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions """ found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found def _translate_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Translate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If 'is_regex' true, then 'pattern' is directly compiled to a regex (if it's a string) or just returned as-is (assumes it's a regex object). """ if is_regex: if isinstance(pattern, str): return re.compile(pattern) else: return pattern if _PYTHON_VERSION > (3, 2): # ditch start and end characters start, _, end = self._glob_to_re('_').partition('_') if pattern: pattern_re = self._glob_to_re(pattern) if _PYTHON_VERSION > (3, 2): assert pattern_re.startswith(start) and pattern_re.endswith(end) else: pattern_re = '' base = re.escape(os.path.join(self.base, '')) if prefix is not None: # ditch end of pattern character if _PYTHON_VERSION <= (3, 2): empty_pattern = self._glob_to_re('') prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] else: prefix_re = self._glob_to_re(prefix) assert prefix_re.startswith(start) and prefix_re.endswith(end) prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] sep = os.sep if os.sep == '\\': sep = r'\\' if _PYTHON_VERSION <= (3, 2): pattern_re = '^' + base + sep.join((prefix_re, '.*' + pattern_re)) else: pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, pattern_re, end) else: # no prefix -- respect anchor flag if anchor: if _PYTHON_VERSION <= (3, 2): pattern_re = '^' + base + pattern_re else: pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) return re.compile(pattern_re) def _glob_to_re(self, pattern): """Translate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from 'fnmatch.translate()' in that '*' does not match "special characters" (which are platform-specific). """ pattern_re = fnmatch.translate(pattern) # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, # and by extension they shouldn't match such "special characters" under # any OS. So change all non-escaped dots in the RE to match any # character except the special characters (currently: just os.sep). sep = os.sep if os.sep == '\\': # we're using a regex to manipulate a regex, so we need # to escape the backslash twice sep = r'\\\\' escaped = r'\1[^%s]' % sep pattern_re = re.sub(r'((?ddlmZdZdZdZdZdZdZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZyddlZddlZWnek reZZnXeefZyeef7ZWnek rnXd d d d gZejd dkr3ddlZn ddlZejZdZdZ e dZ!dZ"dZ#dZ$dZ%dZ&dZ'dZ(dZ)dZ*dZ+dZ,dZ-dZ.dZ/dZ0dZ1d Z2d!Z3d"Z4d#Z5d Z6d$Z7d%Z8e7Z9e'e(e)e*e-e.e/e+e,e0e1e2f Z:e'e(e/e2fZ;e0e1e2fZ<d&d'd(d)d*d+d,d-fZ=e>d&d'd,d-fZ?ie@d.6e@d/6e@d)6eAd*6eAd+6eAd(6ZBd0ZCd1ZDd2ZEd3ZFd4ZGd5ZHd6ZId7ZJdZKd8ZLd9ZMd:ZNd;ZOd<ZPd=ZQd>ZRd%ZSd$ZTe jUd?d@fkr)dAZVn ejWZVdBZXdCZYdDZZd=e9dEZ[dFZ\edGZ]eCdHfeDdIfeEdJfeFdKfeGdLfeHdMffeLdNffeMdOffeNeIBdPfeId feNd!ffeOdNffePdOffeQeJBdPfeJd feQd!ffeRdNffeSdOffeTeKBdQfeKdRfeTd!fff Z^dSZ_d e`fdTYZadUeafdVYZbdWeafdXYZcdYeafdZYZdd[eafd\YZed]eafd^YZfd_effd`YZgdaeffdbYZhdceffddYZideeffdfYZjdgeffdhYZkdielfdjYZmdkelfdlYZndmelfdnYZodoelfdpYZpdqelfdrYZqdselfdtYZrd elfduYZsd elfdvYZtdwelfdxYZudyZveZwetjZdS(zi(tprint_functions $Revision$s0.9.0s&Lars Gust\u00e4bel (lars@gustaebel.de)s5$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $s?$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $s8Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend.NtTarFiletTarInfot is_tarfiletTarErroriisiisustar sustar00idit0t1t2t3t4t5t6t7tLtKtStxtgtXiitpathtlinkpathtsizetmtimetuidtgidtunametgnametatimetctimeiii`i@i iiiiii@i iiitnttcesutf-8cCs,|j||}|| |t|tS(s8Convert a string to a null-terminated bytes object. (tencodetlentNUL(tstlengthtencodingterrors((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytstnscCs8|jd}|dkr(|| }n|j||S(s8Convert a null-terminated bytes object to a string. si(tfindtdecode(R"R$R%tp((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytntss  cCs|dtdkr^y%tt|ddp1dd}Wqtk rZtdqXnId}x@tt|dD](}|dK}|t||d7}q{W|S( s/Convert a number field to a python number. iitasciitstrictRisinvalid headeri(tchrtintR*t ValueErrortInvalidHeaderErrortrangeR tord(R"tnti((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytntis%  cCsd|kod|dknrHd|d|fjdt}n|tksh|d|dkrwtdn|dkrtjdtjd |d}nt}x6t|dD]$}|j d|d @|dL}qW|j dd |S( s/Convert a python number to a number field. iiis%0*oR+isoverflow in number fieldR tlii( RR!t GNU_FORMATR/tstructtunpacktpackt bytearrayR1tinsert(R3tdigitstformatR"R4((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytitns $$  % cCsxdttjd|d tjd|dd!}dttjd|d tjd|dd!}||fS( sCalculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. it148Bit356Biit148bt356b(tsumR8R9(tbuftunsigned_chksumt signed_chksum((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt calc_chksumss 77cCs|dkrdS|dkrSx0trN|jd}|s>Pn|j|qWdSd}t||\}}xQt|D]C}|j|}t||krtdn|j|q{W|dkr|j|}t||krtdn|j|ndS(sjCopy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. iNiisend of file reachedi@i@(tNonetTruetreadtwritetdivmodR1R tIOError(tsrctdstR#REtBUFSIZEtblockst remaindertb((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt copyfileobjs,    R6t-RTtdtcR)trtwR"tttTcCsig}xStD]K}xB|D]-\}}||@|kr|j|PqqW|jdq Wdj|S(scConvert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() RVt(tfilemode_tabletappendtjoin(tmodetpermttabletbittchar((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytfilemode8s  cBseZdZRS(sBase exception.(t__name__t __module__t__doc__(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRGst ExtractErrorcBseZdZRS(s%General exception for extract errors.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRjJst ReadErrorcBseZdZRS(s&Exception for unreadable tar archives.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRkMstCompressionErrorcBseZdZRS(s.Exception for unavailable compression methods.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRlPst StreamErrorcBseZdZRS(s=Exception for unsupported operations on stream-like TarFiles.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRmSst HeaderErrorcBseZdZRS(s!Base exception for header errors.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRnVstEmptyHeaderErrorcBseZdZRS(sException for empty headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRoYstTruncatedHeaderErrorcBseZdZRS(s Exception for truncated headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRp\stEOFHeaderErrorcBseZdZRS(s"Exception for end of file headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRq_sR0cBseZdZRS(sException for invalid headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR0bstSubsequentHeaderErrorcBseZdZRS(s3Exception for missing and invalid extended headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRrest _LowLevelFilecBs2eZdZdZdZdZdZRS(sLow-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. cCsgitjd6tjtjBtjBd6|}ttdrK|tjO}ntj||d|_dS(NRYRZtO_BINARYi( tostO_RDONLYtO_WRONLYtO_CREATtO_TRUNCthasattrRttopentfd(tselftnameRa((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__init__rs cCstj|jdS(N(RutcloseR|(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR{scCstj|j|S(N(RuRKR|(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRK~scCstj|j|dS(N(RuRLR|(R}R"((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRLs(RgRhRiRRRKRL(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsls   t_StreamcBseZdZdZdZdZdZdZdZdZ dZ d d Z dd Z d Zd ZRS(sClass that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. cCst|_|dkr0t||}t|_n|dkrWt|}|j}n|p`d|_||_||_ ||_ ||_ d|_ d|_ t|_y|dkr%yddl}Wntk rtdnX||_|jd|_|dkr|jq%|jn|d kryddl}Wntk r`td nX|dkrd|_|j|_q|j|_nWn,|js|j jnt|_nXdS( s$Construct a _Stream object. t*R]itgziNszlib module is not availableRYtbz2sbz2 module is not available(RJt _extfileobjRIRstFalset _StreamProxyt getcomptypeR~RatcomptypetfileobjtbufsizeREtpostclosedtzlibt ImportErrorRltcrc32tcrct _init_read_gzt_init_write_gzRtdbuftBZ2Decompressortcmpt BZ2CompressorR(R}R~RaRRRRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsP                        cCs*t|dr&|j r&|jndS(NR(RzRR(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__del__scCs|jjd|jj|jj |jjd|_tjdtt j }|j d|d|j j dr|j d |_ n|j |j j dd td S( s6Initialize for writing with gzip compression. i isZ2RS(@sInformational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. R~RaRRRRtchksumttypetlinknameRRtdevmajortdevminorRRt pax_headersRRt_sparse_structst _link_targetR]cCs||_d|_d|_d|_d|_d|_d|_t|_d|_ d|_ d|_ d|_ d|_ d|_d|_d|_i|_dS(sXConstruct a TarInfo object. name is the optional name of the member. iiR]N(R~RaRRRRRtREGTYPERRRRRRRRRIRR(R}R~((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs"                cCs|jS(N(R~(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_getpathscCs ||_dS(N(R~(R}R~((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_setpathscCs|jS(N(R(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt _getlinkpathscCs ||_dS(N(R(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt _setlinkpathscCs d|jj|jt|fS(Ns<%s %r at %#x>(t __class__RgR~tid(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__repr__scCsi |jd6|jd@d6|jd6|jd6|jd6|jd6|jd6|jd 6|jd 6|j d 6|j d 6|j d 6|j d6}|d t kr|djd r|dcd7R$R%R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyttobufs    cCst|dny||jd d Wn"tk r||||nXt|||kr>||||q>WxddddfD]\}}||krd||R$R%tpartsRER((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRYs&$#cCs@tt|t\}}|dkr<|t|t7}n|S(sdReturn the string payload filled with zero bytes up to the next 512 byte border. i(RMR RR!(tpayloadRRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_create_payloadus cCsm|j||t}i}d|d<||d|j|S|jtttfkrc|j |S|j |SdS(sYChoose the right processing method depending on the type and call it. N( RRRt _proc_gnulongRt _proc_sparseRRtSOLARIS_XHDTYPEt _proc_paxt _proc_builtin(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR%s   cCsx|jj|_|j}|js6|jtkrO||j|j7}n||_|j |j |j |j |S(sfProcess a builtin type or an unknown type which will be treated as a regular file. ( RRRtisregRtSUPPORTED_TYPESt_blockRRt_apply_pax_infoRR$R%(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR+$s  cCs|jj|j|j}y|j|}Wntk rPtdnX|j|_|jt krt ||j |j |_ n-|jtkrt ||j |j |_n|S(sSProcess the blocks that hold a GNU longname or longlink member. s missing or bad subsequent header(RRKR.RR&RnRrRRRR*R$R%R~RR(R}RREtnext((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR'5s  c Cs|j\}}}|`x|r|jjt}d}xtdD]}}y6t|||d!}t||d|d!} Wntk rPnX|r| r|j|| fn|d7}qFWt|d}qW||_ |jj |_ |j |j |j |_||_ |S(s8Process a GNU sparse header plus extra headers. iii ii(RRRKRR1R5R/R_RRRRR.RR( R}RR R"R#RERR4RR!((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR(Ks(     cCs|jj|j|j}|jtkr9|j}n|jj}tj d|}|dk r|j dj d|ds  cCsx|jD]\}}|dkr8t|d|q |dkr]t|dt|q |dkrt|dt|q |tkr |tkryt||}Wqtk rd}qXn|dkr|jd}nt|||q q W|j|_dS( soReplace fields with supplemental information from a previous pax extended or global header. sGNU.sparse.nameRsGNU.sparse.sizeRsGNU.sparse.realsizeiRN( RtsetattrR.t PAX_FIELDStPAX_NUMBER_FIELDSR/RRR(R}RR$R%RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR/s"        cCs9y|j|dSWntk r4|j||SXdS(s1Decode a single field from a pax record. R,N(R(tUnicodeDecodeError(R}RR$tfallback_encodingtfallback_errors((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR:s cCs0t|t\}}|r(|d7}n|tS(s_Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. i(RMR(R}RRRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR. s cCs |jtkS(N(Rt REGULAR_TYPES(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR,scCs |jS(N(R,(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisfilescCs |jtkS(N(RR(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRscCs |jtkS(N(RtSYMTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytissymscCs |jtkS(N(RtLNKTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytislnkscCs |jtkS(N(RtCHRTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytischr scCs |jtkS(N(RtBLKTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisblk"scCs |jtkS(N(RtFIFOTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisfifo$scCs |jdk S(N(RRI(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytissparse&scCs|jtttfkS(N(RRTRVRX(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisdev(s(R~RaRRRRRRRRRRRRRRRRRR(3RgRhRit __slots__RRRtpropertyRRRRRRtDEFAULT_FORMATtENCODINGRRRRt classmethodR Rt staticmethodRRRRR$R&R%R+R'R(R*R=R<R>R/R:R.R,RORRQRSRURWRYRZR[(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs`         1  3?    f             c Bs-eZdZdZeZeZdZeZ e Z d1Z eZeZd1dd1d1d1d1d1d1dd1d1d1d Zed1dd1edZedd1dZedd1dd Zedd1dd Zid d 6d d6dd6ZdZdZdZdZd1d1d1dZedZ d1ed1d1dZ!d1dZ"dd1dZ#dedZ$dZ%edZ&dZ'd Z(d!Z)d"Z*d#Z+d$Z,d%Z-d&Z.d'Z/d(Z0d1ed)Z1d*Z2d1d+Z3d,Z4d-Z5d.Z6d/Z7d0Z8RS(2s=The TarFile Class provides an interface to tar archives. iiRYRc Cst|dks|dkr-tdn||_idd6dd6dd 6||_|s|jdkrtjj| rd |_d|_nt||j}t|_ nN|d krt |d r|j }nt |d r|j|_nt |_ |rtjj|nd |_ ||_|d k rC||_n|d k r[||_n|d k rs||_n|d k r||_n|d k r||_n| |_| d k r|jtkr| |_n i|_| d k r| |_n| d k r | |_nt|_g|_t|_|jj|_i|_y9|jdkrod |_ |j!|_ n|jdkrxt r|jj"|jy&|jj#|}|jj$|Wqt%k r|jj"|jPqt&k r } t't(| qXqWn|jd krzt |_|jrz|jj)|jj*}|jj+||jt|7_qznWn,|j s|jj,nt |_nXd S(sOpen an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. iRsmode must be 'r', 'a' or 'w'trbRYsr+btatwbRZR~RatawN(-R R/Rat_modeRuRtexistst bltn_openRRRIRzR~RJtabspathRR>Rt dereferencet ignore_zerosR$R%RRtdebugt errorlevelRtmemberst_loadedRRtinodest firstmemberR0RR&R_RqRnRkRR RRLR(R}R~RaRR>RRjRkR$R%RRlRmteRE((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRFs  ""     !                             c Ks4| r| rtdn|dkrx|jD]}t||j|}|dk rj|j}ny||d||SWq3ttfk r} |dk r3|j|q3q3q3Xq3WtdnUd|krV|jdd\} }| pd} |pd}||jkr3t||j|}ntd|||| ||Sd |kr|jd d\} }| pd} |pd}| d krtd nt || |||} y||| | |} Wn| j nXt | _ | S|d kr$|j ||||Std dS(s|Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing snothing to openRYsr:*s%file could not be opened successfullyt:iRsunknown compression type %rt|trwsmode must be 'r' or 'w'Resundiscernible modeN(RYsr:*(R/t OPEN_METHRRIRRkRlRRERRRRttaropen( R R~RaRRtkwargsRtfunct saved_posRrRftstreamR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR{sN              cKs@t|dks|dkr-tdn|||||S(sCOpen uncompressed tar archive name for reading or writing. iRsmode must be 'r', 'a' or 'w'(R R/(R R~RaRRx((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRwsi c Ks6t|dks|dkr-tdnyddl}|jWn#ttfk ritdnX|dk }y8|j||d||}|j||||}Wnxt k r| r|dk r|j n|dkrnt dn*| r"|dk r"|j nnX||_ |S( skOpen gzip compressed tar archive name for reading or writing. Appending is not allowed. iRusmode must be 'r' or 'w'iNsgzip module is not availableRTsnot a gzip file( R R/tgziptGzipFileRtAttributeErrorRlRIRwRNRRkR( R R~RaRt compresslevelRxR|t extfileobjR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytgzopens.        cKst|dks|dkr-tdnyddl}Wntk r\tdnX|dk r{t||}n|j||d|}y|j||||}Wn-t t fk r|j t dnXt |_|S( slOpen bzip2 compressed tar archive name for reading or writing. Appending is not allowed. iRusmode must be 'r' or 'w'.iNsbz2 module is not availableRsnot a bzip2 file(R R/RRRlRIRtBZ2FileRwRNtEOFErrorRRkRR(R R~RaRRRxRR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytbz2open$s     RwRRRRRcCs|jr dS|jdkr|jjttd|jtd7_t|jt\}}|dkr|jjtt|qn|j s|jj nt |_dS(slClose the TarFile. In write-mode, two finishing zero blocks are appended to the archive. NReii( RRaRRLR!RRRMt RECORDSIZERRRJ(R}RRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRHs   cCs2|j|}|dkr.td|n|S(sReturn a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. sfilename %r not foundN(t _getmemberRItKeyError(R}R~R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt getmember\s cCs'|j|js |jn|jS(sReturn the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. (t_checkRot_loadRn(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt getmembersgs   cCs g|jD]}|j^q S(sReturn the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). (RR~(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytgetnamesqsc Cs\|jd|d k r%|j}n|d kr:|}ntjj|\}}|jtjd}|jd}|j }||_ |d krt tdr|j rtj |}qtj|}ntj|j}d}|j}tj|r|j|jf} |j rj|jdkrj| |jkrj||j| krjt} |j| }qt} | dr||j| slink toN(RtprintRfRaRRRRRURWRRRRt localtimeRR~RRQRRS(R}tverboseR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRAs&   !)  c Cs|jd|dkr"|}n|dk rtddl}|jdtd||rt|jdd|dSn|jdk rtjj ||jkr|jdd|dS|jd||j ||}|dkr|jdd |dS|dk r;||}|dkr;|jdd|dSn|j rst |d }|j |||jn|jr|j ||rxTtj|D]@}|jtjj||tjj||||d |qWqn |j |dS( s~Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. ReiNsuse the filter argument insteadistarfile: Excluded %rstarfile: Skipped %ristarfile: Unsupported type %rRbtfilter(RRItwarningstwarntDeprecationWarningt_dbgR~RuRRiRR,RhtaddfileRRtlistdirtaddR`( R}R~Rt recursivetexcludeRRRtf((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsD        *        *cCs|jdtj|}|j|j|j|j}|jj||jt |7_|dk rt ||j|j t |j t\}}|dkr|jjtt||d7}n|j|t7_n|jj|dS(s]Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. ReiiN(RRRR>R$R%RRLRR RIRURRMRR!RnR_(R}RRRERRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR4s    t.cCs:g}|dkr|}nx_|D]W}|jr\|j|tj|}d|_n|j||d|j q"W|jdd|jx|D]}tj j ||j }y4|j |||j |||j||Wqtk r1}|jdkrq2|jdd|qXqWdS(sMExtract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). it set_attrstkeycSs|jS(N(R~(Rc((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytdR]is tarfile: %sN(RIRR_RRatextracttsorttreverseRuRR`R~tchowntutimetchmodRjRmR(R}RRnt directoriesRtdirpathRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt extractallNs*      !  R]cCs=|jdt|tr.|j|}n|}|jr^tjj||j|_ ny,|j |tjj||j d|Wnt k r}|j dkrq9|jdkr|jdd|jq9|jdd|j|jfn<tk r8}|j dkr!q9|jdd|nXdS(sxExtract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. RYRiis tarfile: %sstarfile: %s %rN(RRRRRSRuRR`RRt_extract_memberR~tEnvironmentErrorRmtfilenameRIRtstrerrorRj(R}tmemberRRRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRts&  ! #cCs|jdt|tr.|j|}n|}|jrP|j||S|jtkro|j||S|js|j rt|j t rt dq|j |j|SndSdS(sExtract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() RYs'cannot extract (sym)link as file objectN(RRRRR,t fileobjectRR-RSRQRRRmt extractfilet_find_link_targetRI(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs  cCs|jd}|jdtj}tjj|}|r_tjj| r_tj|n|jsw|j r|j dd|j |j fn|j d|j |j r|j||n|jr|j||n|jr |j||n|js"|jr5|j||n]|jsM|j r`|j||n2|jtkr|j||n|j|||r|j|||j s|j|||j||qndS(s\Extract the TarInfo object tarinfo to a physical file called targetpath. Ris%s -> %sN(RRRuRRtdirnameRgtmakedirsRSRQRR~RR,tmakefileRtmakedirRYtmakefifoRURWtmakedevtmakelinkRR-t makeunknownRRR(R}Rt targetpathRt upperdirs((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs4#    cCsFytj|dWn+tk rA}|jtjkrBqBnXdS(s,Make a directory called targetpath. iN(RutmkdirRterrnotEEXIST(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs cCs|j}|j|jt|d}|jdk rqxJ|jD])\}}|j|t|||qAWnt|||j|j|j|j|j dS(s'Make a file called targetpath. RdN( RRRRhRRIRURttruncateR(R}RRtsourcettargetRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs   cCs+|j|||jdd|jdS(sYMake a file from a TarInfo object with an unknown type at targetpath. is9tarfile: Unknown file type %r, extracted as regular file.N(RRR(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs/ttdrtj|n tddS(s'Make a fifo called targetpath. tmkfifosfifo not supported by systemN(RzRuRRj(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCsttd s ttd r/tdn|j}|jrT|tjO}n |tjO}tj||tj |j |j dS(s<Make a character or block device called targetpath. tmknodRs'special devices not supported by systemN( RzRuRjRaRWRtS_IFBLKtS_IFCHRRRRR(R}RRRa((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s     cCsyj|jr%tj|j|nDtjj|jrPtj|j|n|j|j ||WnPt k r|jrtjj tjj |j |j}q|j}n>Xy|j|j ||Wntk rtdnXdS(sMake a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. s%unable to resolve link inside archiveN(RQRutsymlinkRRRgRtlinkRRtsymlink_exceptionR`RR~RRj(R}RRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR' s"       cCstrttdrtjdkrytj|jd}Wntk r]|j}nXytj |j d}Wntk r|j }nXyZ|j rttdrtj |||n%tjdkrtj|||nWqtk r}tdqXndS(s6Set owner of targetpath according to tarinfo. tgeteuidiitlchowntos2emxscould not change ownerN(RRzRuRRtgetgrnamRRRtgetpwnamRRRQRtsystplatformRRRj(R}RRRtuRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRD s '    cCsOttdrKytj||jWqKtk rG}tdqKXndS(sASet file permissions of targetpath according to tarinfo. Rscould not change modeN(RzRuRRaRRj(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRZ s cCsYttdsdSy tj||j|jfWntk rT}tdnXdS(sBSet modification time of targetpath according to tarinfo. RNs"could not change modification time(RzRuRRRRj(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRc s  cCs|jd|jdk r2|j}d|_|S|jj|jd}xktry|jj|}WnGt k r}|j r|j dd|j|f|jt 7_qNqnt k r+}|j r|j dd|j|f|jt 7_qNq|jdkrtt|qntk rY|jdkrtdqn[tk r}|jdkrtt|qn%tk r}tt|nXPqNW|dk r|jj|n t|_|S(sReturn the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. trais0x%X: %sis empty fileN(RRqRIRRRRJRR&RqRkRRR0RkRRoRpRrRnR_Ro(R}tmRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR0n sF          cCs|j}|dk r.||j| }n|rItjj|}nxKt|D]=}|rztjj|j}n |j}||krV|SqVWdS(s}Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. N(RRItindexRuRtnormpathtreversedR~(R}R~Rt normalizeRnRt member_name((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCs6x&tr(|j}|dkrPqqWt|_dS(sWRead through the entire archive file and look for readable members. N(RJR0RIRo(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCsW|jr"td|jjn|dk rS|j|krStd|jndS(snCheck if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. s %s is closedsbad operation for mode %rN(RRNRRgRIRa(R}Ra((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs|jr5tjj|jd|j}d}n|j}|}|j|d|dt}|dkr~t d|n|S(sZFind the target member of a symlink or hardlink member in the archive. RRRslinkname %r not foundN( RQRuRRR~RRIRRJR(R}RRtlimitR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s     cCs$|jrt|jSt|SdS(s$Provide an iterator object. N(RotiterRntTarIter(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s  cCs)||jkr%t|dtjndS(s.Write debugging output to sys.stderr. tfileN(RlRRtstderr(R}tleveltmsg((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCs|j|S(N(R(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt __enter__ s cCs?|dkr|jn"|js2|jjnt|_dS(N(RIRRRRJR(R}RRt traceback((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__exit__ s    N(9RgRhRiRlRRjRkRmR^R>R_R$RIR%RRRRRR`RR{RwRRRvRRRRRRJRARRRRRRRRRRRRRRRR0RRRRRRRR(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR,sn  iK   b > &# & 0       1    RcBs/eZdZdZdZdZeZRS(sMIterator Class. for tarinfo in TarFile(...): suite... cCs||_d|_dS(s$Construct a TarIter object. iN(RR(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs|S(s Return iterator object. ((R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCs}|jjs9|jj}|sjt|j_tqjn1y|jj|j}Wntk ritnX|jd7_|S(sReturn the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. i(RRoR0RJt StopIterationRnRt IndexError(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__next__ s     (RgRhRiRRRR0(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCs7yt|}|jtSWntk r2tSXdS(sfReturn True if name points to a tar archive that we are able to handle, else return False. N(R{RRJRR(R~R[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR# s    (xt __future__Rt __version__tversiont __author__t__date__t __cvsid__t __credits__RRuRRRR8RR3RRRRIR~tNotImplementedErrorRt WindowsErrort NameErrort__all__t version_infot __builtin__tbuiltinsR{t_openR!RRRRRRR RRRRRPRTRVRRXtCONTTYPERRRRRR)RR7RR^R-RNRRItsetR;RR.RJtS_IFLNKtS_IFREGRtS_IFDIRRtS_IFIFOtTSUIDtTSGIDtTSVTXtTUREADtTUWRITEtTUEXECtTGREADtTGWRITEtTGEXECtTOREADtTOWRITEtTOEXECR~R_tgetfilesystemencodingR&R*R5R?RHRUR^Rft ExceptionRRjRkRlRmRnRoRpRqR0RrtobjectRsRRRRRRRRRRh(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyts.                                                 ?K* PK.e[DBCiidistlib/_backport/tarfile.pynu[#------------------------------------------------------------------- # tarfile.py #------------------------------------------------------------------- # Copyright (C) 2002 Lars Gustaebel # All rights reserved. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # from __future__ import print_function """Read from and write to tar format archives. """ __version__ = "$Revision$" version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" __cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." #--------- # Imports #--------- import sys import os import stat import errno import time import struct import copy import re try: import grp, pwd except ImportError: grp = pwd = None # os.symlink on Windows prior to 6.0 raises NotImplementedError symlink_exception = (AttributeError, NotImplementedError) try: # WindowsError (1314) will be raised if the caller does not hold the # SeCreateSymbolicLinkPrivilege privilege symlink_exception += (WindowsError,) except NameError: pass # from tarfile import * __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins _open = builtins.open # Since 'open' is TarFile.open #--------------------------------------------------------- # tar constants #--------------------------------------------------------- NUL = b"\0" # the null character BLOCKSIZE = 512 # length of processing blocks RECORDSIZE = BLOCKSIZE * 20 # length of records GNU_MAGIC = b"ustar \0" # magic gnu tar string POSIX_MAGIC = b"ustar\x0000" # magic posix tar string LENGTH_NAME = 100 # maximum length of a filename LENGTH_LINK = 100 # maximum length of a linkname LENGTH_PREFIX = 155 # maximum length of the prefix field REGTYPE = b"0" # regular file AREGTYPE = b"\0" # regular file LNKTYPE = b"1" # link (inside tarfile) SYMTYPE = b"2" # symbolic link CHRTYPE = b"3" # character special device BLKTYPE = b"4" # block special device DIRTYPE = b"5" # directory FIFOTYPE = b"6" # fifo special device CONTTYPE = b"7" # contiguous file GNUTYPE_LONGNAME = b"L" # GNU tar longname GNUTYPE_LONGLINK = b"K" # GNU tar longlink GNUTYPE_SPARSE = b"S" # GNU tar sparse file XHDTYPE = b"x" # POSIX.1-2001 extended header XGLTYPE = b"g" # POSIX.1-2001 global header SOLARIS_XHDTYPE = b"X" # Solaris extended header USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format GNU_FORMAT = 1 # GNU tar format PAX_FORMAT = 2 # POSIX.1-2001 (pax) format DEFAULT_FORMAT = GNU_FORMAT #--------------------------------------------------------- # tarfile constants #--------------------------------------------------------- # File types that tarfile supports: SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # File types that will be treated as a regular file. REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) # File types that are part of the GNU tar format. GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # Fields from a pax header that override a TarInfo attribute. PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") # Fields from a pax header that are affected by hdrcharset. PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) # Fields in a pax header that are numbers, all other fields # are treated as strings. PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } #--------------------------------------------------------- # Bits used in the mode field, values in octal. #--------------------------------------------------------- S_IFLNK = 0o120000 # symbolic link S_IFREG = 0o100000 # regular file S_IFBLK = 0o060000 # block device S_IFDIR = 0o040000 # directory S_IFCHR = 0o020000 # character device S_IFIFO = 0o010000 # fifo TSUID = 0o4000 # set UID on execution TSGID = 0o2000 # set GID on execution TSVTX = 0o1000 # reserved TUREAD = 0o400 # read by owner TUWRITE = 0o200 # write by owner TUEXEC = 0o100 # execute/search by owner TGREAD = 0o040 # read by group TGWRITE = 0o020 # write by group TGEXEC = 0o010 # execute/search by group TOREAD = 0o004 # read by other TOWRITE = 0o002 # write by other TOEXEC = 0o001 # execute/search by other #--------------------------------------------------------- # initialization #--------------------------------------------------------- if os.name in ("nt", "ce"): ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() #--------------------------------------------------------- # Some useful functions #--------------------------------------------------------- def stn(s, length, encoding, errors): """Convert a string to a null-terminated bytes object. """ s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): """Convert a null-terminated bytes object to a string. """ p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): """Convert a number field to a python number. """ # There are two possible encodings for a number field, see # itn() below. if s[0] != chr(0o200): try: n = int(nts(s, "ascii", "strict") or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") else: n = 0 for i in range(len(s) - 1): n <<= 8 n += ord(s[i + 1]) return n def itn(n, digits=8, format=DEFAULT_FORMAT): """Convert a python number to a number field. """ # POSIX 1003.1-1988 requires numbers to be encoded as a string of # octal digits followed by a null-byte, this allows values up to # (8**(digits-1))-1. GNU tar allows storing numbers greater than # that if necessary. A leading 0o200 byte indicates this particular # encoding, the following digits-1 bytes are a big-endian # representation. This allows values up to (256**(digits-1))-1. if 0 <= n < 8 ** (digits - 1): s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL else: if format != GNU_FORMAT or n >= 256 ** (digits - 1): raise ValueError("overflow in number field") if n < 0: # XXX We mimic GNU tar's behaviour with negative numbers, # this could raise OverflowError. n = struct.unpack("L", struct.pack("l", n))[0] s = bytearray() for i in range(digits - 1): s.insert(0, n & 0o377) n >>= 8 s.insert(0, 0o200) return s def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return filemode_table = ( ((S_IFLNK, "l"), (S_IFREG, "-"), (S_IFBLK, "b"), (S_IFDIR, "d"), (S_IFCHR, "c"), (S_IFIFO, "p")), ((TUREAD, "r"),), ((TUWRITE, "w"),), ((TUEXEC|TSUID, "s"), (TSUID, "S"), (TUEXEC, "x")), ((TGREAD, "r"),), ((TGWRITE, "w"),), ((TGEXEC|TSGID, "s"), (TSGID, "S"), (TGEXEC, "x")), ((TOREAD, "r"),), ((TOWRITE, "w"),), ((TOEXEC|TSVTX, "t"), (TSVTX, "T"), (TOEXEC, "x")) ) def filemode(mode): """Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() """ perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm) class TarError(Exception): """Base exception.""" pass class ExtractError(TarError): """General exception for extract errors.""" pass class ReadError(TarError): """Exception for unreadable tar archives.""" pass class CompressionError(TarError): """Exception for unavailable compression methods.""" pass class StreamError(TarError): """Exception for unsupported operations on stream-like TarFiles.""" pass class HeaderError(TarError): """Base exception for header errors.""" pass class EmptyHeaderError(HeaderError): """Exception for empty headers.""" pass class TruncatedHeaderError(HeaderError): """Exception for truncated headers.""" pass class EOFHeaderError(HeaderError): """Exception for end of file headers.""" pass class InvalidHeaderError(HeaderError): """Exception for invalid headers.""" pass class SubsequentHeaderError(HeaderError): """Exception for missing and invalid extended headers.""" pass #--------------------------- # internal stream interface #--------------------------- class _LowLevelFile(object): """Low-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. """ def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if hasattr(os, "O_BINARY"): mode |= os.O_BINARY self.fd = os.open(name, mode, 0o666) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream(object): """Class that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. """ def __init__(self, name, mode, comptype, fileobj, bufsize): """Construct a _Stream object. """ self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': # Enable transparent compression detection for the # stream interface fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = b"" self.pos = 0 self.closed = False try: if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") self.zlib = zlib self.crc = zlib.crc32(b"") if mode == "r": self._init_read_gz() else: self._init_write_gz() if comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if mode == "r": self.dbuf = b"" self.cmp = bz2.BZ2Decompressor() else: self.cmp = bz2.BZ2Compressor() except: if not self._extfileobj: self.fileobj.close() self.closed = True raise def __del__(self): if hasattr(self, "closed") and not self.closed: self.close() def _init_write_gz(self): """Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack(" self.bufsize: self.fileobj.write(self.buf[:self.bufsize]) self.buf = self.buf[self.bufsize:] def close(self): """Close the _Stream object. No operation should be done on it afterwards. """ if self.closed: return if self.mode == "w" and self.comptype != "tar": self.buf += self.cmp.flush() if self.mode == "w" and self.buf: self.fileobj.write(self.buf) self.buf = b"" if self.comptype == "gz": # The native zlib crc is an unsigned 32-bit integer, but # the Python wrapper implicitly casts that to a signed C # long. So, on a 32-bit box self.crc may "look negative", # while the same crc on a 64-bit box may "look positive". # To avoid irksome warnings from the `struct` module, force # it to look positive on all boxes. self.fileobj.write(struct.pack("= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos def read(self, size=None): """Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf def __read(self, size): """Return size bytes from stream. If internal buffer is empty, read another block from the stream. """ c = len(self.buf) while c < size: buf = self.fileobj.read(self.bufsize) if not buf: break self.buf += buf c += len(buf) buf = self.buf[:size] self.buf = self.buf[size:] return buf # class _Stream class _StreamProxy(object): """Small proxy class that enables transparent compression detection for the Stream interface (mode 'r|*'). """ def __init__(self, fileobj): self.fileobj = fileobj self.buf = self.fileobj.read(BLOCKSIZE) def read(self, size): self.read = self.fileobj.read return self.buf def getcomptype(self): if self.buf.startswith(b"\037\213\010"): return "gz" if self.buf.startswith(b"BZh91"): return "bz2" return "tar" def close(self): self.fileobj.close() # class StreamProxy class _BZ2Proxy(object): """Small proxy class that enables external file object support for "r:bz2" and "w:bz2" modes. This is actually a workaround for a limitation in bz2 module's BZ2File class which (unlike gzip.GzipFile) has no support for a file object argument. """ blocksize = 16 * 1024 def __init__(self, fileobj, mode): self.fileobj = fileobj self.mode = mode self.name = getattr(self.fileobj, "name", None) self.init() def init(self): import bz2 self.pos = 0 if self.mode == "r": self.bz2obj = bz2.BZ2Decompressor() self.fileobj.seek(0) self.buf = b"" else: self.bz2obj = bz2.BZ2Compressor() def read(self, size): x = len(self.buf) while x < size: raw = self.fileobj.read(self.blocksize) if not raw: break data = self.bz2obj.decompress(raw) self.buf += data x += len(data) buf = self.buf[:size] self.buf = self.buf[size:] self.pos += len(buf) return buf def seek(self, pos): if pos < self.pos: self.init() self.read(pos - self.pos) def tell(self): return self.pos def write(self, data): self.pos += len(data) raw = self.bz2obj.compress(data) self.fileobj.write(raw) def close(self): if self.mode == "w": raw = self.bz2obj.flush() self.fileobj.write(raw) # class _BZ2Proxy #------------------------ # Extraction file object #------------------------ class _FileInFile(object): """A thin wrapper around an existing file object that provides a part of its data as an individual file object. """ def __init__(self, fileobj, offset, size, blockinfo=None): self.fileobj = fileobj self.offset = offset self.size = size self.position = 0 if blockinfo is None: blockinfo = [(0, size)] # Construct a map with data and zero blocks. self.map_index = 0 self.map = [] lastpos = 0 realpos = self.offset for offset, size in blockinfo: if offset > lastpos: self.map.append((False, lastpos, offset, None)) self.map.append((True, offset, offset + size, realpos)) realpos += size lastpos = offset + size if lastpos < self.size: self.map.append((False, lastpos, self.size, None)) def seekable(self): if not hasattr(self.fileobj, "seekable"): # XXX gzip.GzipFile and bz2.BZ2File return True return self.fileobj.seekable() def tell(self): """Return the current file position. """ return self.position def seek(self, position): """Seek to a position in the file. """ self.position = position def read(self, size=None): """Read data from the file. """ if size is None: size = self.size - self.position else: size = min(size, self.size - self.position) buf = b"" while size > 0: while True: data, start, stop, offset = self.map[self.map_index] if start <= self.position < stop: break else: self.map_index += 1 if self.map_index == len(self.map): self.map_index = 0 length = min(size, stop - self.position) if data: self.fileobj.seek(offset + (self.position - start)) buf += self.fileobj.read(length) else: buf += NUL * length size -= length self.position += length return buf #class _FileInFile class ExFileObject(object): """File-like object for reading an archive member. Is returned by TarFile.extractfile(). """ blocksize = 1024 def __init__(self, tarfile, tarinfo): self.fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse) self.name = tarinfo.name self.mode = "r" self.closed = False self.size = tarinfo.size self.position = 0 self.buffer = b"" def readable(self): return True def writable(self): return False def seekable(self): return self.fileobj.seekable() def read(self, size=None): """Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") buf = b"" if self.buffer: if size is None: buf = self.buffer self.buffer = b"" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] if size is None: buf += self.fileobj.read() else: buf += self.fileobj.read(size - len(buf)) self.position += len(buf) return buf # XXX TextIOWrapper uses the read1() method. read1 = read def readline(self, size=-1): """Read one entire line from the file. If size is present and non-negative, return a string with at most that size, which may be an incomplete line. """ if self.closed: raise ValueError("I/O operation on closed file") pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. while True: buf = self.fileobj.read(self.blocksize) self.buffer += buf if not buf or b"\n" in buf: pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. pos = len(self.buffer) break if size != -1: pos = min(size, pos) buf = self.buffer[:pos] self.buffer = self.buffer[pos:] self.position += len(buf) return buf def readlines(self): """Return a list with all remaining lines. """ result = [] while True: line = self.readline() if not line: break result.append(line) return result def tell(self): """Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") return self.position def seek(self, pos, whence=os.SEEK_SET): """Seek to a position in the file. """ if self.closed: raise ValueError("I/O operation on closed file") if whence == os.SEEK_SET: self.position = min(max(pos, 0), self.size) elif whence == os.SEEK_CUR: if pos < 0: self.position = max(self.position + pos, 0) else: self.position = min(self.position + pos, self.size) elif whence == os.SEEK_END: self.position = max(min(self.size + pos, self.size), 0) else: raise ValueError("Invalid argument") self.buffer = b"" self.fileobj.seek(self.position) def close(self): """Close the file object. """ self.closed = True def __iter__(self): """Get an iterator over the file's lines. """ while True: line = self.readline() if not line: break yield line #class ExFileObject #------------------ # Exported Classes #------------------ class TarInfo(object): """Informational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. """ __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", "chksum", "type", "linkname", "uname", "gname", "devmajor", "devminor", "offset", "offset_data", "pax_headers", "sparse", "tarfile", "_sparse_structs", "_link_target") def __init__(self, name=""): """Construct a TarInfo object. name is the optional name of the member. """ self.name = name # member name self.mode = 0o644 # file permissions self.uid = 0 # user id self.gid = 0 # group id self.size = 0 # file size self.mtime = 0 # modification time self.chksum = 0 # header checksum self.type = REGTYPE # member type self.linkname = "" # link name self.uname = "" # user name self.gname = "" # group name self.devmajor = 0 # device major number self.devminor = 0 # device minor number self.offset = 0 # the tar header starts here self.offset_data = 0 # the file's data starts here self.sparse = None # sparse member information self.pax_headers = {} # pax header information # In pax headers the "name" and "linkname" field are called # "path" and "linkpath". def _getpath(self): return self.name def _setpath(self, name): self.name = name path = property(_getpath, _setpath) def _getlinkpath(self): return self.linkname def _setlinkpath(self, linkname): self.linkname = linkname linkpath = property(_getlinkpath, _setlinkpath) def __repr__(self): return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) def get_info(self): """Return the TarInfo's attributes as a dictionary. """ info = { "name": self.name, "mode": self.mode & 0o7777, "uid": self.uid, "gid": self.gid, "size": self.size, "mtime": self.mtime, "chksum": self.chksum, "type": self.type, "linkname": self.linkname, "uname": self.uname, "gname": self.gname, "devmajor": self.devmajor, "devminor": self.devminor } if info["type"] == DIRTYPE and not info["name"].endswith("/"): info["name"] += "/" return info def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): """Return a tar header as a string of 512 byte blocks. """ info = self.get_info() if format == USTAR_FORMAT: return self.create_ustar_header(info, encoding, errors) elif format == GNU_FORMAT: return self.create_gnu_header(info, encoding, errors) elif format == PAX_FORMAT: return self.create_pax_header(info, encoding) else: raise ValueError("invalid format") def create_ustar_header(self, info, encoding, errors): """Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors) def create_gnu_header(self, info, encoding, errors): """Return the object as a GNU header block sequence. """ info["magic"] = GNU_MAGIC buf = b"" if len(info["linkname"]) > LENGTH_LINK: buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) if len(info["name"]) > LENGTH_NAME: buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) return buf + self._create_header(info, GNU_FORMAT, encoding, errors) def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") @classmethod def create_pax_global_header(cls, pax_headers): """Return the object as a pax global header block sequence. """ return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name @staticmethod def _create_header(info, format, encoding, errors): """Return a header block. info is a dictionary with file information, format must be one of the *_FORMAT constants. """ parts = [ stn(info.get("name", ""), 100, encoding, errors), itn(info.get("mode", 0) & 0o7777, 8, format), itn(info.get("uid", 0), 8, format), itn(info.get("gid", 0), 8, format), itn(info.get("size", 0), 12, format), itn(info.get("mtime", 0), 12, format), b" ", # checksum field info.get("type", REGTYPE), stn(info.get("linkname", ""), 100, encoding, errors), info.get("magic", POSIX_MAGIC), stn(info.get("uname", ""), 32, encoding, errors), stn(info.get("gname", ""), 32, encoding, errors), itn(info.get("devmajor", 0), 8, format), itn(info.get("devminor", 0), 8, format), stn(info.get("prefix", ""), 155, encoding, errors) ] buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) chksum = calc_chksums(buf[-BLOCKSIZE:])[0] buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] return buf @staticmethod def _create_payload(payload): """Return the string payload filled with zero bytes up to the next 512 byte border. """ blocks, remainder = divmod(len(payload), BLOCKSIZE) if remainder > 0: payload += (BLOCKSIZE - remainder) * NUL return payload @classmethod def _create_gnu_long_header(cls, name, type, encoding, errors): """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) @classmethod def _create_pax_generic_header(cls, pax_headers, type, encoding): """Return a POSIX.1-2008 extended or global header sequence that contains a list of keyword, value pairs. The values must be strings. """ # Check if one of the fields contains surrogate characters and thereby # forces hdrcharset=BINARY, see _proc_pax() for more information. binary = False for keyword, value in pax_headers.items(): try: value.encode("utf8", "strict") except UnicodeEncodeError: binary = True break records = b"" if binary: # Put the hdrcharset field at the beginning of the header. records += b"21 hdrcharset=BINARY\n" for keyword, value in pax_headers.items(): keyword = keyword.encode("utf8") if binary: # Try to restore the original byte representation of `value'. # Needless to say, that the encoding must match the string. value = value.encode(encoding, "surrogateescape") else: value = value.encode("utf8") l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" # We use a hardcoded "././@PaxHeader" name like star does # instead of the one that POSIX recommends. info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = POSIX_MAGIC # Create pax header + record blocks. return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ cls._create_payload(records) @classmethod def frombuf(cls, buf, encoding, errors): """Construct a TarInfo object from a 512 byte bytes object. """ if len(buf) == 0: raise EmptyHeaderError("empty header") if len(buf) != BLOCKSIZE: raise TruncatedHeaderError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise EOFHeaderError("end of file header") chksum = nti(buf[148:156]) if chksum not in calc_chksums(buf): raise InvalidHeaderError("bad checksum") obj = cls() obj.name = nts(buf[0:100], encoding, errors) obj.mode = nti(buf[100:108]) obj.uid = nti(buf[108:116]) obj.gid = nti(buf[116:124]) obj.size = nti(buf[124:136]) obj.mtime = nti(buf[136:148]) obj.chksum = chksum obj.type = buf[156:157] obj.linkname = nts(buf[157:257], encoding, errors) obj.uname = nts(buf[265:297], encoding, errors) obj.gname = nts(buf[297:329], encoding, errors) obj.devmajor = nti(buf[329:337]) obj.devminor = nti(buf[337:345]) prefix = nts(buf[345:500], encoding, errors) # Old V7 tar format represents a directory as a regular # file with a trailing slash. if obj.type == AREGTYPE and obj.name.endswith("/"): obj.type = DIRTYPE # The old GNU sparse format occupies some of the unused # space in the buffer for up to 4 sparse structures. # Save the them for later processing in _proc_sparse(). if obj.type == GNUTYPE_SPARSE: pos = 386 structs = [] for i in range(4): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[482]) origsize = nti(buf[483:495]) obj._sparse_structs = (structs, isextended, origsize) # Remove redundant slashes from directories. if obj.isdir(): obj.name = obj.name.rstrip("/") # Reconstruct a ustar longname. if prefix and obj.type not in GNU_TYPES: obj.name = prefix + "/" + obj.name return obj @classmethod def fromtarfile(cls, tarfile): """Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) obj.offset = tarfile.fileobj.tell() - BLOCKSIZE return obj._proc_member(tarfile) #-------------------------------------------------------------------------- # The following are methods that are called depending on the type of a # member. The entry point is _proc_member() which can be overridden in a # subclass to add custom _proc_*() methods. A _proc_*() method MUST # implement the following # operations: # 1. Set self.offset_data to the position where the data blocks begin, # if there is data that follows. # 2. Set tarfile.offset to the position where the next member's header will # begin. # 3. Return self or another valid TarInfo object. def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile) def _proc_builtin(self, tarfile): """Process a builtin type or an unknown type which will be treated as a regular file. """ self.offset_data = tarfile.fileobj.tell() offset = self.offset_data if self.isreg() or self.type not in SUPPORTED_TYPES: # Skip the following data blocks. offset += self._block(self.size) tarfile.offset = offset # Patch the TarInfo object with saved global # header information. self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) return self def _proc_gnulong(self, tarfile): """Process the blocks that hold a GNU longname or longlink member. """ buf = tarfile.fileobj.read(self._block(self.size)) # Fetch the next header and process it. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Patch the TarInfo object from the next header with # the longname information. next.offset = self.offset if self.type == GNUTYPE_LONGNAME: next.name = nts(buf, tarfile.encoding, tarfile.errors) elif self.type == GNUTYPE_LONGLINK: next.linkname = nts(buf, tarfile.encoding, tarfile.errors) return next def _proc_sparse(self, tarfile): """Process a GNU sparse header plus extra headers. """ # We already collected some sparse structures in frombuf(). structs, isextended, origsize = self._sparse_structs del self._sparse_structs # Collect sparse structures from extended header blocks. while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self def _proc_pax(self, tarfile): """Process an extended or global header as described in POSIX.1-2008. """ # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use "utf8" as the encoding and "strict" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, "utf8", "utf8", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, "utf8", "utf8", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Process GNU sparse information. if "GNU.sparse.map" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif "GNU.sparse.size" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes)) def _proc_gnusparse_01(self, next, pax_headers): """Process a GNU tar extended sparse header, version 0.1. """ sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] next.sparse = list(zip(sparse[::2], sparse[1::2])) def _proc_gnusparse_10(self, next, pax_headers, tarfile): """Process a GNU tar extended sparse header, version 1.0. """ fields = None sparse = [] buf = tarfile.fileobj.read(BLOCKSIZE) fields, buf = buf.split(b"\n", 1) fields = int(fields) while len(sparse) < fields * 2: if b"\n" not in buf: buf += tarfile.fileobj.read(BLOCKSIZE) number, buf = buf.split(b"\n", 1) sparse.append(int(number)) next.offset_data = tarfile.fileobj.tell() next.sparse = list(zip(sparse[::2], sparse[1::2])) def _apply_pax_info(self, pax_headers, encoding, errors): """Replace fields with supplemental information from a previous pax extended or global header. """ for keyword, value in pax_headers.items(): if keyword == "GNU.sparse.name": setattr(self, "path", value) elif keyword == "GNU.sparse.size": setattr(self, "size", int(value)) elif keyword == "GNU.sparse.realsize": setattr(self, "size", int(value)) elif keyword in PAX_FIELDS: if keyword in PAX_NUMBER_FIELDS: try: value = PAX_NUMBER_FIELDS[keyword](value) except ValueError: value = 0 if keyword == "path": value = value.rstrip("/") setattr(self, keyword, value) self.pax_headers = pax_headers.copy() def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): """Decode a single field from a pax record. """ try: return value.decode(encoding, "strict") except UnicodeDecodeError: return value.decode(fallback_encoding, fallback_errors) def _block(self, count): """Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. """ blocks, remainder = divmod(count, BLOCKSIZE) if remainder: blocks += 1 return blocks * BLOCKSIZE def isreg(self): return self.type in REGULAR_TYPES def isfile(self): return self.isreg() def isdir(self): return self.type == DIRTYPE def issym(self): return self.type == SYMTYPE def islnk(self): return self.type == LNKTYPE def ischr(self): return self.type == CHRTYPE def isblk(self): return self.type == BLKTYPE def isfifo(self): return self.type == FIFOTYPE def issparse(self): return self.sparse is not None def isdev(self): return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) # class TarInfo class TarFile(object): """The TarFile Class provides an interface to tar archives. """ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) dereference = False # If true, add content of linked file to the # tar file, else the link. ignore_zeros = False # If true, skips empty or invalid blocks and # continues processing. errorlevel = 1 # If 0, fatal errors only appear in debug # messages (if debug >= 0). If > 0, errors # are passed to the caller as exceptions. format = DEFAULT_FORMAT # The format to use when creating an archive. encoding = ENCODING # Encoding for 8-bit character strings. errors = None # Error handler for unicode conversion. tarinfo = TarInfo # The default TarInfo class to use. fileobject = ExFileObject # The default ExFileObject class to use. def __init__(self, name=None, mode="r", fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") self.mode = mode self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] if not fileobj: if self.mode == "a" and not os.path.exists(name): # Create nonexistent files in append mode. self.mode = "w" self._mode = "wb" fileobj = bltn_open(name, self._mode) self._extfileobj = False else: if name is None and hasattr(fileobj, "name"): name = fileobj.name if hasattr(fileobj, "mode"): self._mode = fileobj.mode self._extfileobj = True self.name = os.path.abspath(name) if name else None self.fileobj = fileobj # Init attributes. if format is not None: self.format = format if tarinfo is not None: self.tarinfo = tarinfo if dereference is not None: self.dereference = dereference if ignore_zeros is not None: self.ignore_zeros = ignore_zeros if encoding is not None: self.encoding = encoding self.errors = errors if pax_headers is not None and self.format == PAX_FORMAT: self.pax_headers = pax_headers else: self.pax_headers = {} if debug is not None: self.debug = debug if errorlevel is not None: self.errorlevel = errorlevel # Init datastructures. self.closed = False self.members = [] # list of members as TarInfo objects self._loaded = False # flag if all members have been read self.offset = self.fileobj.tell() # current position in the archive file self.inodes = {} # dictionary caching the inodes of # archive members already added try: if self.mode == "r": self.firstmember = None self.firstmember = self.next() if self.mode == "a": # Move to the end of the archive, # before the first empty block. while True: self.fileobj.seek(self.offset) try: tarinfo = self.tarinfo.fromtarfile(self) self.members.append(tarinfo) except EOFHeaderError: self.fileobj.seek(self.offset) break except HeaderError as e: raise ReadError(str(e)) if self.mode in "aw": self._loaded = True if self.pax_headers: buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) self.fileobj.write(buf) self.offset += len(buf) except: if not self._extfileobj: self.fileobj.close() self.closed = True raise #-------------------------------------------------------------------------- # Below are the classmethods which act as alternate constructors to the # TarFile class. The open() method is the only one that is needed for # public use; it is the "super"-constructor and is able to select an # adequate "sub"-constructor for a particular compression using the mapping # from OPEN_METH. # # This concept allows one to subclass TarFile without losing the comfort of # the super-constructor. A sub-constructor is registered and made available # by adding it to the mapping in OPEN_METH. @classmethod def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): """Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: raise ValueError("nothing to open") if mode in ("r", "r:*"): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, "r", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") elif ":" in mode: filemode, comptype = mode.split(":", 1) filemode = filemode or "r" comptype = comptype or "tar" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError("unknown compression type %r" % comptype) return func(name, filemode, fileobj, **kwargs) elif "|" in mode: filemode, comptype = mode.split("|", 1) filemode = filemode or "r" comptype = comptype or "tar" if filemode not in "rw": raise ValueError("mode must be 'r' or 'w'") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in "aw": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError("undiscernible mode") @classmethod def taropen(cls, name, mode="r", fileobj=None, **kwargs): """Open uncompressed tar archive name for reading or writing. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") return cls(name, mode, fileobj, **kwargs) @classmethod def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open gzip compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'") try: import gzip gzip.GzipFile except (ImportError, AttributeError): raise CompressionError("gzip module is not available") extfileobj = fileobj is not None try: fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if not extfileobj and fileobj is not None: fileobj.close() if fileobj is None: raise raise ReadError("not a gzip file") except: if not extfileobj and fileobj is not None: fileobj.close() raise t._extfileobj = extfileobj return t @classmethod def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'.") try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if fileobj is not None: fileobj = _BZ2Proxy(fileobj, mode) else: fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) try: t = cls.taropen(name, mode, fileobj, **kwargs) except (IOError, EOFError): fileobj.close() raise ReadError("not a bzip2 file") t._extfileobj = False return t # All *open() methods are registered here. OPEN_METH = { "tar": "taropen", # uncompressed tar "gz": "gzopen", # gzip compressed tar "bz2": "bz2open" # bzip2 compressed tar } #-------------------------------------------------------------------------- # The public methods which TarFile provides: def close(self): """Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return if self.mode in "aw": self.fileobj.write(NUL * (BLOCKSIZE * 2)) self.offset += (BLOCKSIZE * 2) # fill up the end with zero-blocks # (like option -b20 for tar does) blocks, remainder = divmod(self.offset, RECORDSIZE) if remainder > 0: self.fileobj.write(NUL * (RECORDSIZE - remainder)) if not self._extfileobj: self.fileobj.close() self.closed = True def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo def getmembers(self): """Return the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. """ self._check() if not self._loaded: # if we want to obtain a list of self._load() # all members, we first have to # scan the whole archive. return self.members def getnames(self): """Return the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). """ return [tarinfo.name for tarinfo in self.getmembers()] def gettarinfo(self, name=None, arcname=None, fileobj=None): """Create a TarInfo object for either the file `name' or the file object `fileobj' (using os.fstat on its file descriptor). You can modify some of the TarInfo's attributes before you add it using addfile(). If given, `arcname' specifies an alternative name for the file in the archive. """ self._check("aw") # When fileobj is given, replace name by # fileobj's real name. if fileobj is not None: name = fileobj.name # Building the name of the member in the archive. # Backward slashes are converted to forward slashes, # Absolute paths are turned to relative paths. if arcname is None: arcname = name drv, arcname = os.path.splitdrive(arcname) arcname = arcname.replace(os.sep, "/") arcname = arcname.lstrip("/") # Now, fill the TarInfo object with # information specific for the file. tarinfo = self.tarinfo() tarinfo.tarfile = self # Use os.stat or os.lstat, depending on platform # and if symlinks shall be resolved. if fileobj is None: if hasattr(os, "lstat") and not self.dereference: statres = os.lstat(name) else: statres = os.stat(name) else: statres = os.fstat(fileobj.fileno()) linkname = "" stmd = statres.st_mode if stat.S_ISREG(stmd): inode = (statres.st_ino, statres.st_dev) if not self.dereference and statres.st_nlink > 1 and \ inode in self.inodes and arcname != self.inodes[inode]: # Is it a hardlink to an already # archived file? type = LNKTYPE linkname = self.inodes[inode] else: # The inode is added only if its valid. # For win32 it is always 0. type = REGTYPE if inode[0]: self.inodes[inode] = arcname elif stat.S_ISDIR(stmd): type = DIRTYPE elif stat.S_ISFIFO(stmd): type = FIFOTYPE elif stat.S_ISLNK(stmd): type = SYMTYPE linkname = os.readlink(name) elif stat.S_ISCHR(stmd): type = CHRTYPE elif stat.S_ISBLK(stmd): type = BLKTYPE else: return None # Fill the TarInfo object with all # information we can get. tarinfo.name = arcname tarinfo.mode = stmd tarinfo.uid = statres.st_uid tarinfo.gid = statres.st_gid if type == REGTYPE: tarinfo.size = statres.st_size else: tarinfo.size = 0 tarinfo.mtime = statres.st_mtime tarinfo.type = type tarinfo.linkname = linkname if pwd: try: tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] except KeyError: pass if grp: try: tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] except KeyError: pass if type in (CHRTYPE, BLKTYPE): if hasattr(os, "major") and hasattr(os, "minor"): tarinfo.devmajor = os.major(statres.st_rdev) tarinfo.devminor = os.minor(statres.st_rdev) return tarinfo def list(self, verbose=True): """Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced. """ self._check() for tarinfo in self: if verbose: print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): print("%10s" % ("%d,%d" \ % (tarinfo.devmajor, tarinfo.devminor)), end=' ') else: print("%10d" % tarinfo.size, end=' ') print("%d-%02d-%02d %02d:%02d:%02d" \ % time.localtime(tarinfo.mtime)[:6], end=' ') print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') if verbose: if tarinfo.issym(): print("->", tarinfo.linkname, end=' ') if tarinfo.islnk(): print("link to", tarinfo.linkname, end=' ') print() def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): """Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. """ self._check("aw") if arcname is None: arcname = name # Exclude pathnames. if exclude is not None: import warnings warnings.warn("use the filter argument instead", DeprecationWarning, 2) if exclude(name): self._dbg(2, "tarfile: Excluded %r" % name) return # Skip if somebody tries to archive the archive... if self.name is not None and os.path.abspath(name) == self.name: self._dbg(2, "tarfile: Skipped %r" % name) return self._dbg(1, name) # Create a TarInfo object from the file. tarinfo = self.gettarinfo(name, arcname) if tarinfo is None: self._dbg(1, "tarfile: Unsupported type %r" % name) return # Change or exclude the TarInfo object. if filter is not None: tarinfo = filter(tarinfo) if tarinfo is None: self._dbg(2, "tarfile: Excluded %r" % name) return # Append the tar header and data to the archive. if tarinfo.isreg(): f = bltn_open(name, "rb") self.addfile(tarinfo, f) f.close() elif tarinfo.isdir(): self.addfile(tarinfo) if recursive: for f in os.listdir(name): self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter=filter) else: self.addfile(tarinfo) def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo) def extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 0o700 # Do not set_attrs directories, as we will do that further down self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) # Reverse sort directories. directories.sort(key=lambda a: a.name) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extract(self, member, path="", set_attrs=True): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, "tarfile: %s" % e.strerror) else: self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extractfile(self, member): """Extract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member if tarinfo.isreg(): return self.fileobject(self, tarinfo) elif tarinfo.type not in SUPPORTED_TYPES: # If a member's type is unknown, it is treated as a # regular file. return self.fileobject(self, tarinfo) elif tarinfo.islnk() or tarinfo.issym(): if isinstance(self.fileobj, _Stream): # A small but ugly workaround for the case that someone tries # to extract a (sym)link as a file-object from a non-seekable # stream of tar blocks. raise StreamError("cannot extract (sym)link as file object") else: # A (sym)link's file object is its target's file object. return self.extractfile(self._find_link_target(tarinfo)) else: # If there's no data associated with the member (directory, chrdev, # blkdev, etc.), return None instead of a file object. return None def _extract_member(self, tarinfo, targetpath, set_attrs=True): """Extract the TarInfo object tarinfo to a physical file called targetpath. """ # Fetch the TarInfo object for the given name # and build the destination pathname, replacing # forward slashes to platform specific separators. targetpath = targetpath.rstrip("/") targetpath = targetpath.replace("/", os.sep) # Create all upper directories. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): # Create directories that are not part of the archive with # default permissions. os.makedirs(upperdirs) if tarinfo.islnk() or tarinfo.issym(): self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) else: self._dbg(1, tarinfo.name) if tarinfo.isreg(): self.makefile(tarinfo, targetpath) elif tarinfo.isdir(): self.makedir(tarinfo, targetpath) elif tarinfo.isfifo(): self.makefifo(tarinfo, targetpath) elif tarinfo.ischr() or tarinfo.isblk(): self.makedev(tarinfo, targetpath) elif tarinfo.islnk() or tarinfo.issym(): self.makelink(tarinfo, targetpath) elif tarinfo.type not in SUPPORTED_TYPES: self.makeunknown(tarinfo, targetpath) else: self.makefile(tarinfo, targetpath) if set_attrs: self.chown(tarinfo, targetpath) if not tarinfo.issym(): self.chmod(tarinfo, targetpath) self.utime(tarinfo, targetpath) #-------------------------------------------------------------------------- # Below are the different file methods. They are called via # _extract_member() when extract() is called. They can be replaced in a # subclass to implement other functionality. def makedir(self, tarinfo, targetpath): """Make a directory called targetpath. """ try: # Use a safe mode for the directory, the real mode is set # later in _extract_member(). os.mkdir(targetpath, 0o700) except EnvironmentError as e: if e.errno != errno.EEXIST: raise def makefile(self, tarinfo, targetpath): """Make a file called targetpath. """ source = self.fileobj source.seek(tarinfo.offset_data) target = bltn_open(targetpath, "wb") if tarinfo.sparse is not None: for offset, size in tarinfo.sparse: target.seek(offset) copyfileobj(source, target, size) else: copyfileobj(source, target, tarinfo.size) target.seek(tarinfo.size) target.truncate() target.close() def makeunknown(self, tarinfo, targetpath): """Make a file from a TarInfo object with an unknown type at targetpath. """ self.makefile(tarinfo, targetpath) self._dbg(1, "tarfile: Unknown file type %r, " \ "extracted as regular file." % tarinfo.type) def makefifo(self, tarinfo, targetpath): """Make a fifo called targetpath. """ if hasattr(os, "mkfifo"): os.mkfifo(targetpath) else: raise ExtractError("fifo not supported by system") def makedev(self, tarinfo, targetpath): """Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor)) def makelink(self, tarinfo, targetpath): """Make a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. """ try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive") def chown(self, tarinfo, targetpath): """Set owner of targetpath according to tarinfo. """ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: # We have to be root to do so. try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner") def chmod(self, tarinfo, targetpath): """Set file permissions of targetpath according to tarinfo. """ if hasattr(os, 'chmod'): try: os.chmod(targetpath, tarinfo.mode) except EnvironmentError as e: raise ExtractError("could not change mode") def utime(self, tarinfo, targetpath): """Set modification time of targetpath according to tarinfo. """ if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time") #-------------------------------------------------------------------------- def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods: def _getmember(self, name, tarinfo=None, normalize=False): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() # Limit the member search list up to tarinfo. if tarinfo is not None: members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if name == member_name: return member def _load(self): """Read through the entire archive file and look for readable members. """ while True: tarinfo = self.next() if tarinfo is None: break self._loaded = True def _check(self, mode=None): """Check if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. """ if self.closed: raise IOError("%s is closed" % self.__class__.__name__) if mode is not None and self.mode not in mode: raise IOError("bad operation for mode %r" % self.mode) def _find_link_target(self, tarinfo): """Find the target member of a symlink or hardlink member in the archive. """ if tarinfo.issym(): # Always search the entire archive. linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname limit = None else: # Search the archive before the link, because a hard link is # just a reference to an already archived file. linkname = tarinfo.linkname limit = tarinfo member = self._getmember(linkname, tarinfo=limit, normalize=True) if member is None: raise KeyError("linkname %r not found" % linkname) return member def __iter__(self): """Provide an iterator object. """ if self._loaded: return iter(self.members) else: return TarIter(self) def _dbg(self, level, msg): """Write debugging output to sys.stderr. """ if level <= self.debug: print(msg, file=sys.stderr) def __enter__(self): self._check() return self def __exit__(self, type, value, traceback): if type is None: self.close() else: # An exception occurred. We must not call close() because # it would try to write end-of-archive blocks and padding. if not self._extfileobj: self.fileobj.close() self.closed = True # class TarFile class TarIter(object): """Iterator Class. for tarinfo in TarFile(...): suite... """ def __init__(self, tarfile): """Construct a TarIter object. """ self.tarfile = tarfile self.index = 0 def __iter__(self): """Return iterator object. """ return self def __next__(self): """Return the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, # which will cause TarIter to stop prematurely. if not self.tarfile._loaded: tarinfo = self.tarfile.next() if not tarinfo: self.tarfile._loaded = True raise StopIteration else: try: tarinfo = self.tarfile.members[self.index] except IndexError: raise StopIteration self.index += 1 return tarinfo next = __next__ # for Python 2.x #-------------------- # exported functions #-------------------- def is_tarfile(name): """Return True if name points to a tar archive that we are able to handle, else return False. """ try: t = open(name) t.close() return True except TarError: return False bltn_open = open open = TarFile.open PK.e[b=/d/ddistlib/_backport/shutil.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Utility functions for copying and archiving files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. """ import os import sys import stat from os.path import abspath import fnmatch import collections import errno from . import tarfile try: import bz2 _BZ2_SUPPORTED = True except ImportError: _BZ2_SUPPORTED = False try: from pwd import getpwnam except ImportError: getpwnam = None try: from grp import getgrnam except ImportError: getgrnam = None __all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", "copytree", "move", "rmtree", "Error", "SpecialFileError", "ExecError", "make_archive", "get_archive_formats", "register_archive_format", "unregister_archive_format", "get_unpack_formats", "register_unpack_format", "unregister_unpack_format", "unpack_archive", "ignore_patterns"] class Error(EnvironmentError): pass class SpecialFileError(EnvironmentError): """Raised when trying to do a kind of operation (e.g. copying) which is not supported on a special file (e.g. a named pipe)""" class ExecError(EnvironmentError): """Raised when a command could not be executed""" class ReadError(EnvironmentError): """Raised when an archive cannot be read""" class RegistryError(Exception): """Raised when a registry operation with the archiving and unpacking registries fails""" try: WindowsError except NameError: WindowsError = None def copyfileobj(fsrc, fdst, length=16*1024): """copy data from file-like object fsrc to file-like object fdst""" while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf) def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def copyfile(src, dst): """Copy data from src to dst""" if _samefile(src, dst): raise Error("`%s` and `%s` are the same file" % (src, dst)) for fn in [src, dst]: try: st = os.stat(fn) except OSError: # File most likely does not exist pass else: # XXX What about other special files? (sockets, devices...) if stat.S_ISFIFO(st.st_mode): raise SpecialFileError("`%s` is a named pipe" % fn) with open(src, 'rb') as fsrc: with open(dst, 'wb') as fdst: copyfileobj(fsrc, fdst) def copymode(src, dst): """Copy mode bits from src to dst""" if hasattr(os, 'chmod'): st = os.stat(src) mode = stat.S_IMODE(st.st_mode) os.chmod(dst, mode) def copystat(src, dst): """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" st = os.stat(src) mode = stat.S_IMODE(st.st_mode) if hasattr(os, 'utime'): os.utime(dst, (st.st_atime, st.st_mtime)) if hasattr(os, 'chmod'): os.chmod(dst, mode) if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): try: os.chflags(dst, st.st_flags) except OSError as why: if (not hasattr(errno, 'EOPNOTSUPP') or why.errno != errno.EOPNOTSUPP): raise def copy(src, dst): """Copy data and mode bits ("cp src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copymode(src, dst) def copy2(src, dst): """Copy data and all stat info ("cp -p src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copystat(src, dst) def ignore_patterns(*patterns): """Function that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude files""" def _ignore_patterns(path, names): ignored_names = [] for pattern in patterns: ignored_names.extend(fnmatch.filter(names, pattern)) return set(ignored_names) return _ignore_patterns def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, ignore_dangling_symlinks=False): """Recursively copy a directory tree. The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. If the file pointed by the symlink doesn't exist, an exception will be added in the list of errors raised in an Error exception at the end of the copy process. You can set the optional ignore_dangling_symlinks flag to true if you want to silence this exception. Notice that this has no effect on platforms that don't support os.symlink. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. The optional copy_function argument is a callable that will be used to copy each file. It will be called with the source path and the destination path as arguments. By default, copy2() is used, but any function that supports the same signature (like copy()) can be used. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if os.path.islink(srcname): linkto = os.readlink(srcname) if symlinks: os.symlink(linkto, dstname) else: # ignore dangling symlink if the flag is on if not os.path.exists(linkto) and ignore_dangling_symlinks: continue # otherwise let the copy occurs. copy2 will raise an error copy_function(srcname, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore, copy_function) else: # Will raise a SpecialFileError for unsupported file types copy_function(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors) def rmtree(path, ignore_errors=False, onerror=None): """Recursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. """ if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if os.path.islink(path): # symlinks to directories are forbidden, see bug #1669 raise OSError("Cannot call rmtree on a symbolic link") except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return names = [] try: names = os.listdir(path) except os.error: onerror(os.listdir, path, sys.exc_info()) for name in names: fullname = os.path.join(path, name) try: mode = os.lstat(fullname).st_mode except os.error: mode = 0 if stat.S_ISDIR(mode): rmtree(fullname, ignore_errors, onerror) else: try: os.remove(fullname) except os.error: onerror(os.remove, fullname, sys.exc_info()) try: os.rmdir(path) except os.error: onerror(os.rmdir, path, sys.exc_info()) def _basename(path): # A basename() variant which first strips the trailing slash, if present. # Thus we always get the last component of the path, even for directories. return os.path.basename(path.rstrip(os.path.sep)) def move(src, dst): """Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. """ real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): # We might be on a case insensitive filesystem, # perform the rename anyway. os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error("Destination path '%s' already exists" % real_dst) try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src) def _destinsrc(src, dst): src = abspath(src) dst = abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): dst += os.path.sep return dst.startswith(src) def _get_gid(name): """Returns a gid, given a group name.""" if getgrnam is None or name is None: return None try: result = getgrnam(name) except KeyError: result = None if result is not None: return result[2] return None def _get_uid(name): """Returns an uid, given a user name.""" if getpwnam is None or name is None: return None try: result = getpwnam(name) except KeyError: result = None if result is not None: return result[2] return None def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create a (possibly compressed) tar file from all the files under 'base_dir'. 'compress' must be "gzip" (the default), "bzip2", or None. 'owner' and 'group' can be used to define an owner and a group for the archive that is being built. If not provided, the current owner and group will be used. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", or ".bz2"). Returns the output filename. """ tar_compression = {'gzip': 'gz', None: ''} compress_ext = {'gzip': '.gz'} if _BZ2_SUPPORTED: tar_compression['bzip2'] = 'bz2' compress_ext['bzip2'] = '.bz2' # flags for compression program, each element of list will be an argument if compress is not None and compress not in compress_ext: raise ValueError("bad value for 'compress', or compression format not " "supported : {0}".format(compress)) archive_name = base_name + '.tar' + compress_ext.get(compress, '') archive_dir = os.path.dirname(archive_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # creating the tarball if logger is not None: logger.info('Creating tar archive') uid = _get_uid(owner) gid = _get_gid(group) def _set_uid_gid(tarinfo): if gid is not None: tarinfo.gid = gid tarinfo.gname = group if uid is not None: tarinfo.uid = uid tarinfo.uname = owner return tarinfo if not dry_run: tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) try: tar.add(base_dir, filter=_set_uid_gid) finally: tar.close() return archive_name def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): # XXX see if we want to keep an external call here if verbose: zipoptions = "-r" else: zipoptions = "-rq" from distutils.errors import DistutilsExecError from distutils.spawn import spawn try: spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". raise ExecError("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises ExecError. Returns the name of the output zip file. """ zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename _ARCHIVE_FORMATS = { 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (_make_zipfile, [], "ZIP file"), } if _BZ2_SUPPORTED: _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file") def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """ formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats def register_archive_format(name, function, extra_args=None, description=''): """Registers an archive format. name is the name of the format. function is the callable that will be used to create archives. If provided, extra_args is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_archive_formats() function. """ if extra_args is None: extra_args = [] if not isinstance(function, collections.Callable): raise TypeError('The %s object is not callable' % function) if not isinstance(extra_args, (tuple, list)): raise TypeError('extra_args needs to be a sequence') for element in extra_args: if not isinstance(element, (tuple, list)) or len(element) !=2: raise TypeError('extra_args elements are : (arg_name, value)') _ARCHIVE_FORMATS[name] = (function, extra_args, description) def unregister_archive_format(name): del _ARCHIVE_FORMATS[name] def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create an archive file (eg. zip or tar). 'base_name' is the name of the file to create, minus any format-specific extension; 'format' is the archive format: one of "zip", "tar", "bztar" or "gztar". 'root_dir' is a directory that will be the root directory of the archive; ie. we typically chdir into 'root_dir' before creating the archive. 'base_dir' is the directory where we start archiving from; ie. 'base_dir' will be the common prefix of all files and directories in the archive. 'root_dir' and 'base_dir' both default to the current directory. Returns the name of the archive file. 'owner' and 'group' are used when creating a tar archive. By default, uses the current owner and group. """ save_cwd = os.getcwd() if root_dir is not None: if logger is not None: logger.debug("changing into '%s'", root_dir) base_name = os.path.abspath(base_name) if not dry_run: os.chdir(root_dir) if base_dir is None: base_dir = os.curdir kwargs = {'dry_run': dry_run, 'logger': logger} try: format_info = _ARCHIVE_FORMATS[format] except KeyError: raise ValueError("unknown archive format '%s'" % format) func = format_info[0] for arg, val in format_info[1]: kwargs[arg] = val if format != 'zip': kwargs['owner'] = owner kwargs['group'] = group try: filename = func(base_name, base_dir, **kwargs) finally: if root_dir is not None: if logger is not None: logger.debug("changing back to '%s'", save_cwd) os.chdir(save_cwd) return filename def get_unpack_formats(): """Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description) """ formats = [(name, info[0], info[3]) for name, info in _UNPACK_FORMATS.items()] formats.sort() return formats def _check_unpack_options(extensions, function, extra_args): """Checks what gets registered as an unpacker.""" # first make sure no other unpacker is registered for this extension existing_extensions = {} for name, info in _UNPACK_FORMATS.items(): for ext in info[0]: existing_extensions[ext] = name for extension in extensions: if extension in existing_extensions: msg = '%s is already registered for "%s"' raise RegistryError(msg % (extension, existing_extensions[extension])) if not isinstance(function, collections.Callable): raise TypeError('The registered function must be a callable') def register_unpack_format(name, extensions, function, extra_args=None, description=''): """Registers an unpack format. `name` is the name of the format. `extensions` is a list of extensions corresponding to the format. `function` is the callable that will be used to unpack archives. The callable will receive archives to unpack. If it's unable to handle an archive, it needs to raise a ReadError exception. If provided, `extra_args` is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_unpack_formats() function. """ if extra_args is None: extra_args = [] _check_unpack_options(extensions, function, extra_args) _UNPACK_FORMATS[name] = extensions, function, extra_args, description def unregister_unpack_format(name): """Removes the pack format from the registry.""" del _UNPACK_FORMATS[name] def _ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _unpack_zipfile(filename, extract_dir): """Unpack zip `filename` to `extract_dir` """ try: import zipfile except ImportError: raise ReadError('zlib not supported, cannot unpack this archive.') if not zipfile.is_zipfile(filename): raise ReadError("%s is not a zip file" % filename) zip = zipfile.ZipFile(filename) try: for info in zip.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name: continue target = os.path.join(extract_dir, *name.split('/')) if not target: continue _ensure_directory(target) if not name.endswith('/'): # file data = zip.read(info.filename) f = open(target, 'wb') try: f.write(data) finally: f.close() del data finally: zip.close() def _unpack_tarfile(filename, extract_dir): """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` """ try: tarobj = tarfile.open(filename) except tarfile.TarError: raise ReadError( "%s is not a compressed or uncompressed tar file" % filename) try: tarobj.extractall(extract_dir) finally: tarobj.close() _UNPACK_FORMATS = { 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") } if _BZ2_SUPPORTED: _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], "bzip2'ed tar-file") def _find_unpack_format(filename): for name, info in _UNPACK_FORMATS.items(): for extension in info[0]: if filename.endswith(extension): return name return None def unpack_archive(filename, extract_dir=None, format=None): """Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", or "gztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised. """ if extract_dir is None: extract_dir = os.getcwd() if format is not None: try: format_info = _UNPACK_FORMATS[format] except KeyError: raise ValueError("Unknown unpack format '{0}'".format(format)) func = format_info[1] func(filename, extract_dir, **dict(format_info[2])) else: # we need to look at the registered unpackers supported extensions format = _find_unpack_format(filename) if format is None: raise ReadError("Unknown archive format '{0}'".format(filename)) func = _UNPACK_FORMATS[format][1] kwargs = dict(_UNPACK_FORMATS[format][2]) func(filename, extract_dir, **kwargs) PK.e[;VgVgdistlib/_backport/shutil.pyonu[ abc@s"dZddlZddlZddlZddlmZddlZddlZddlZddl m Z yddl Z e Z Wnek reZ nXyddlmZWnek rdZnXyddlmZWnek rdZnXdd d d d d dddddddddddddddgZdefdYZdefdYZdefdYZd efd!YZd"efd#YZyeWnek rdZnXdWd&Zd'Z d(Z!d)Z"d*Z#d+Z$d,Z%d-Z&ede%ed.Z'edd/Z(d0Z)d1Z*d2Z+d3Z,d4Z-d5d6d6dddd7Z.eed8Z/d6d6dd9Z0ie.dXgd;fd<6e.dYgd>fd?6e.dZgd@fdA6e0gdBfdC6Z1e re.d[gd>fe1d?fe=d?dddVZ?dS(\sUtility functions for copying and archiving files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. iN(tabspathi(ttarfile(tgetpwnam(tgetgrnamt copyfileobjtcopyfiletcopymodetcopystattcopytcopy2tcopytreetmovetrmtreetErrortSpecialFileErrort ExecErrort make_archivetget_archive_formatstregister_archive_formattunregister_archive_formattget_unpack_formatstregister_unpack_formattunregister_unpack_formattunpack_archivetignore_patternscBseZRS((t__name__t __module__(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR ,scBseZdZRS(s|Raised when trying to do a kind of operation (e.g. copying) which is not supported on a special file (e.g. a named pipe)(RRt__doc__(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR/scBseZdZRS(s+Raised when a command could not be executed(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR3st ReadErrorcBseZdZRS(s%Raised when an archive cannot be read(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR6st RegistryErrorcBseZdZRS(sVRaised when a registry operation with the archiving and unpacking registries fails(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR9siicCs1x*|j|}|sPn|j|qWdS(s=copy data from file-like object fsrc to file-like object fdstN(treadtwrite(tfsrctfdsttlengthtbuf((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRCs cCs{ttjdrAytjj||SWqAtk r=tSXntjjtjj|tjjtjj|kS(Ntsamefile(thasattrtostpathR$tOSErrortFalsetnormcaseR(tsrctdst((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt _samefileKs c Cst||r(td||fnx`||gD]R}ytj|}Wntk raq5Xtj|jr5td|q5q5Wt|d,}t|d}t ||WdQXWdQXdS(sCopy data from src to dsts`%s` and `%s` are the same files`%s` is a named pipetrbtwbN( R-R R&tstatR(tS_ISFIFOtst_modeRtopenR(R+R,tfntstR R!((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRWs cCsGttdrCtj|}tj|j}tj||ndS(sCopy mode bits from src to dsttchmodN(R%R&R0tS_IMODER2R6(R+R,R5tmode((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRkscCstj|}tj|j}ttdrOtj||j|jfnttdrqtj||nttdrt|drytj ||j Wqt k r}tt d s|j t j krqqXndS(sCCopy all stat info (mode bits, atime, mtime, flags) from src to dsttutimeR6tchflagstst_flagst EOPNOTSUPPN(R&R0R7R2R%R9tst_atimetst_mtimeR6R:R;R(terrnoR<(R+R,R5R8twhy((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRrscCsTtjj|r6tjj|tjj|}nt||t||dS(sVCopy data and mode bits ("cp src dst"). The destination may be a directory. N(R&R'tisdirtjointbasenameRR(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRs$ cCsTtjj|r6tjj|tjj|}nt||t||dS(s]Copy data and all stat info ("cp -p src dst"). The destination may be a directory. N(R&R'RARBRCRR(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR s$ csfd}|S(sFunction that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude filescs:g}x'D]}|jtj||q Wt|S(N(textendtfnmatchtfiltertset(R'tnamest ignored_namestpattern(tpatterns(sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_ignore_patternss ((RKRL((RKsH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRscCs tj|}|dk r-|||}n t}tj|g}xG|D]?} | |krhqPntjj|| } tjj|| } ytjj| rtj| } |rtj | | q6tjj |  r|rwPn|| | n8tjj | r)t | | |||n || | WqPt k r`} |j| jdqPtk r}|j| | t|fqPXqPWyt||WnMtk r}tdk rt|trq|j||t|fnX|r t |ndS(sRecursively copy a directory tree. The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. If the file pointed by the symlink doesn't exist, an exception will be added in the list of errors raised in an Error exception at the end of the copy process. You can set the optional ignore_dangling_symlinks flag to true if you want to silence this exception. Notice that this has no effect on platforms that don't support os.symlink. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. The optional copy_function argument is a callable that will be used to copy each file. It will be called with the source path and the destination path as arguments. By default, copy2() is used, but any function that supports the same signature (like copy()) can be used. iN(R&tlistdirtNoneRGtmakedirsR'RBtislinktreadlinktsymlinktexistsRAR R RDtargstEnvironmentErrortappendtstrRR(t WindowsErrort isinstance(R+R,tsymlinkstignoret copy_functiontignore_dangling_symlinksRHRIterrorstnametsrcnametdstnametlinktoterrR@((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR sD$     $ cCs|rd}n|dkr*d}ny%tjj|rNtdnWn.tk r|tjj|tjdSXg}ytj|}Wn-tjk r|tj|tjnXx|D]}tjj ||}ytj |j }Wntjk rd}nXt j |r@t|||qytj|Wqtjk r|tj|tjqXqWytj|Wn-tjk r|tj|tjnXdS(sRecursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. cWsdS(N((RT((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pytonerrorscWsdS(N((RT((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRdss%Cannot call rmtree on a symbolic linkNi(RNR&R'RPR(tsystexc_infoRMterrorRBtlstatR2R0tS_ISDIRR tremovetrmdir(R't ignore_errorsRdRHR_tfullnameR8((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR s>       !cCstjj|jtjjS(N(R&R'RCtrstriptsep(R'((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt _basename'scCs|}tjj|r~t||r;tj||dStjj|t|}tjj|r~td|q~nytj||Wnt k rtjj|rt ||rtd||fnt ||dt t |qt||tj|nXdS(sRecursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. Ns$Destination path '%s' already existss.Cannot move a directory '%s' into itself '%s'.RZ(R&R'RAR-trenameRBRpRSR R(t _destinsrcR tTrueR R tunlink(R+R,treal_dst((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR ,s$   cCsut|}t|}|jtjjs@|tjj7}n|jtjjsh|tjj7}n|j|S(N(RtendswithR&R'Rot startswith(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRrTs  cCs^tdks|dkrdSyt|}Wntk rEd}nX|dk rZ|dSdS(s"Returns a gid, given a group name.iN(RRNtKeyError(R_tresult((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_get_gid]s   cCs^tdks|dkrdSyt|}Wntk rEd}nX|dk rZ|dSdS(s"Returns an uid, given a user name.iN(RRNRx(R_Ry((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_get_uidis   tgzipics|idd6dd6}idd6} tr>d|d s                       Q1  ( =/    6     %   PK.e[/edistlib/_backport/misc.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Backports for individual classes and functions.""" import os import sys __all__ = ['cache_from_source', 'callable', 'fsencode'] try: from imp import cache_from_source except ImportError: def cache_from_source(py_file, debug=__debug__): ext = debug and 'c' or 'o' return py_file + ext try: callable = callable except NameError: from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode except AttributeError: def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) PK.e[o9 9 distlib/_backport/sysconfig.cfgnu[[posix_prefix] # Configuration directories. Some of these come straight out of the # configure script. They are for implementing the other variables, not to # be used directly in [resource_locations]. confdir = /etc datadir = /usr/share libdir = /usr/lib statedir = /var # User resource directory local = ~/.local/{distribution.name} stdlib = {base}/lib/python{py_version_short} platstdlib = {platbase}/lib/python{py_version_short} purelib = {base}/lib/python{py_version_short}/site-packages platlib = {platbase}/lib/python{py_version_short}/site-packages include = {base}/include/python{py_version_short}{abiflags} platinclude = {platbase}/include/python{py_version_short}{abiflags} data = {base} [posix_home] stdlib = {base}/lib/python platstdlib = {base}/lib/python purelib = {base}/lib/python platlib = {base}/lib/python include = {base}/include/python platinclude = {base}/include/python scripts = {base}/bin data = {base} [nt] stdlib = {base}/Lib platstdlib = {base}/Lib purelib = {base}/Lib/site-packages platlib = {base}/Lib/site-packages include = {base}/Include platinclude = {base}/Include scripts = {base}/Scripts data = {base} [os2] stdlib = {base}/Lib platstdlib = {base}/Lib purelib = {base}/Lib/site-packages platlib = {base}/Lib/site-packages include = {base}/Include platinclude = {base}/Include scripts = {base}/Scripts data = {base} [os2_home] stdlib = {userbase}/lib/python{py_version_short} platstdlib = {userbase}/lib/python{py_version_short} purelib = {userbase}/lib/python{py_version_short}/site-packages platlib = {userbase}/lib/python{py_version_short}/site-packages include = {userbase}/include/python{py_version_short} scripts = {userbase}/bin data = {userbase} [nt_user] stdlib = {userbase}/Python{py_version_nodot} platstdlib = {userbase}/Python{py_version_nodot} purelib = {userbase}/Python{py_version_nodot}/site-packages platlib = {userbase}/Python{py_version_nodot}/site-packages include = {userbase}/Python{py_version_nodot}/Include scripts = {userbase}/Scripts data = {userbase} [posix_user] stdlib = {userbase}/lib/python{py_version_short} platstdlib = {userbase}/lib/python{py_version_short} purelib = {userbase}/lib/python{py_version_short}/site-packages platlib = {userbase}/lib/python{py_version_short}/site-packages include = {userbase}/include/python{py_version_short} scripts = {userbase}/bin data = {userbase} [osx_framework_user] stdlib = {userbase}/lib/python platstdlib = {userbase}/lib/python purelib = {userbase}/lib/python/site-packages platlib = {userbase}/lib/python/site-packages include = {userbase}/include scripts = {userbase}/bin data = {userbase} PK.e["ND7D7distlib/_backport/tarfile.pycnu[ abc @s>ddlmZdZdZdZdZdZdZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZyddlZddlZWnek reZZnXeefZyeef7ZWnek rnXd d d d gZejd dkr3ddlZn ddlZejZdZdZ e dZ!dZ"dZ#dZ$dZ%dZ&dZ'dZ(dZ)dZ*dZ+dZ,dZ-dZ.dZ/dZ0dZ1d Z2d!Z3d"Z4d#Z5d Z6d$Z7d%Z8e7Z9e'e(e)e*e-e.e/e+e,e0e1e2f Z:e'e(e/e2fZ;e0e1e2fZ<d&d'd(d)d*d+d,d-fZ=e>d&d'd,d-fZ?ie@d.6e@d/6e@d)6eAd*6eAd+6eAd(6ZBd0ZCd1ZDd2ZEd3ZFd4ZGd5ZHd6ZId7ZJdZKd8ZLd9ZMd:ZNd;ZOd<ZPd=ZQd>ZRd%ZSd$ZTe jUd?d@fkr)dAZVn ejWZVdBZXdCZYdDZZd=e9dEZ[dFZ\edGZ]eCdHfeDdIfeEdJfeFdKfeGdLfeHdMffeLdNffeMdOffeNeIBdPfeId feNd!ffeOdNffePdOffeQeJBdPfeJd feQd!ffeRdNffeSdOffeTeKBdQfeKdRfeTd!fff Z^dSZ_d e`fdTYZadUeafdVYZbdWeafdXYZcdYeafdZYZdd[eafd\YZed]eafd^YZfd_effd`YZgdaeffdbYZhdceffddYZideeffdfYZjdgeffdhYZkdielfdjYZmdkelfdlYZndmelfdnYZodoelfdpYZpdqelfdrYZqdselfdtYZrd elfduYZsd elfdvYZtdwelfdxYZudyZveZwetjZdS(zi(tprint_functions $Revision$s0.9.0s&Lars Gust\u00e4bel (lars@gustaebel.de)s5$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $s?$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $s8Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend.NtTarFiletTarInfot is_tarfiletTarErroriisiisustar sustar00idit0t1t2t3t4t5t6t7tLtKtStxtgtXiitpathtlinkpathtsizetmtimetuidtgidtunametgnametatimetctimeiii`i@i iiiiii@i iiitnttcesutf-8cCs,|j||}|| |t|tS(s8Convert a string to a null-terminated bytes object. (tencodetlentNUL(tstlengthtencodingterrors((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytstnscCs8|jd}|dkr(|| }n|j||S(s8Convert a null-terminated bytes object to a string. si(tfindtdecode(R"R$R%tp((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytntss  cCs|dtdkr^y%tt|ddp1dd}Wqtk rZtdqXnId}x@tt|dD](}|dK}|t||d7}q{W|S( s/Convert a number field to a python number. iitasciitstrictRisinvalid headeri(tchrtintR*t ValueErrortInvalidHeaderErrortrangeR tord(R"tnti((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytntis%  cCsd|kod|dknrHd|d|fjdt}n|tksh|d|dkrwtdn|dkrtjdtjd |d}nt}x6t|dD]$}|j d|d @|dL}qW|j dd |S( s/Convert a python number to a number field. iiis%0*oR+isoverflow in number fieldR tlii( RR!t GNU_FORMATR/tstructtunpacktpackt bytearrayR1tinsert(R3tdigitstformatR"R4((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytitns $$  % cCsxdttjd|d tjd|dd!}dttjd|d tjd|dd!}||fS( sCalculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. it148Bit356Biit148bt356b(tsumR8R9(tbuftunsigned_chksumt signed_chksum((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt calc_chksumss 77cCs|dkrdS|dkrSx0trN|jd}|s>Pn|j|qWdSd}t||\}}xQt|D]C}|j|}t||krtdn|j|q{W|dkr|j|}t||krtdn|j|ndS(sjCopy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. iNiisend of file reachedi@i@(tNonetTruetreadtwritetdivmodR1R tIOError(tsrctdstR#REtBUFSIZEtblockst remaindertb((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt copyfileobjs,    R6t-RTtdtcR)trtwR"tttTcCsig}xStD]K}xB|D]-\}}||@|kr|j|PqqW|jdq Wdj|S(scConvert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() RVt(tfilemode_tabletappendtjoin(tmodetpermttabletbittchar((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytfilemode8s  cBseZdZRS(sBase exception.(t__name__t __module__t__doc__(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRGst ExtractErrorcBseZdZRS(s%General exception for extract errors.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRjJst ReadErrorcBseZdZRS(s&Exception for unreadable tar archives.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRkMstCompressionErrorcBseZdZRS(s.Exception for unavailable compression methods.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRlPst StreamErrorcBseZdZRS(s=Exception for unsupported operations on stream-like TarFiles.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRmSst HeaderErrorcBseZdZRS(s!Base exception for header errors.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRnVstEmptyHeaderErrorcBseZdZRS(sException for empty headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRoYstTruncatedHeaderErrorcBseZdZRS(s Exception for truncated headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRp\stEOFHeaderErrorcBseZdZRS(s"Exception for end of file headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRq_sR0cBseZdZRS(sException for invalid headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR0bstSubsequentHeaderErrorcBseZdZRS(s3Exception for missing and invalid extended headers.(RgRhRi(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRrest _LowLevelFilecBs2eZdZdZdZdZdZRS(sLow-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. cCsgitjd6tjtjBtjBd6|}ttdrK|tjO}ntj||d|_dS(NRYRZtO_BINARYi( tostO_RDONLYtO_WRONLYtO_CREATtO_TRUNCthasattrRttopentfd(tselftnameRa((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__init__rs cCstj|jdS(N(RutcloseR|(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR{scCstj|j|S(N(RuRKR|(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRK~scCstj|j|dS(N(RuRLR|(R}R"((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRLs(RgRhRiRRRKRL(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsls   t_StreamcBseZdZdZdZdZdZdZdZdZ dZ d d Z dd Z d Zd ZRS(sClass that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. cCst|_|dkr0t||}t|_n|dkrWt|}|j}n|p`d|_||_||_ ||_ ||_ d|_ d|_ t|_y|dkr%yddl}Wntk rtdnX||_|jd|_|dkr|jq%|jn|d kryddl}Wntk r`td nX|dkrd|_|j|_q|j|_nWn,|js|j jnt|_nXdS( s$Construct a _Stream object. t*R]itgziNszlib module is not availableRYtbz2sbz2 module is not available(RJt _extfileobjRIRstFalset _StreamProxyt getcomptypeR~RatcomptypetfileobjtbufsizeREtpostclosedtzlibt ImportErrorRltcrc32tcrct _init_read_gzt_init_write_gzRtdbuftBZ2Decompressortcmpt BZ2CompressorR(R}R~RaRRRRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsP                        cCs*t|dr&|j r&|jndS(NR(RzRR(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__del__scCs|jjd|jj|jj |jjd|_tjdtt j }|j d|d|j j dr|j d |_ n|j |j j dd td S( s6Initialize for writing with gzip compression. i isZ2RS(@sInformational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. R~RaRRRRtchksumttypetlinknameRRtdevmajortdevminorRRt pax_headersRRt_sparse_structst _link_targetR]cCs||_d|_d|_d|_d|_d|_d|_t|_d|_ d|_ d|_ d|_ d|_ d|_d|_d|_i|_dS(sXConstruct a TarInfo object. name is the optional name of the member. iiR]N(R~RaRRRRRtREGTYPERRRRRRRRRIRR(R}R~((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs"                cCs|jS(N(R~(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_getpathscCs ||_dS(N(R~(R}R~((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_setpathscCs|jS(N(R(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt _getlinkpathscCs ||_dS(N(R(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt _setlinkpathscCs d|jj|jt|fS(Ns<%s %r at %#x>(t __class__RgR~tid(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__repr__scCsi |jd6|jd@d6|jd6|jd6|jd6|jd6|jd6|jd 6|jd 6|j d 6|j d 6|j d 6|j d6}|d t kr|djd r|dcd7R$R%R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyttobufs    cCst|dny||jd d Wn"tk r||||nXt|||kr>||||q>WxddddfD]\}}||krd||R$R%tpartsRER((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRYs&$#cCs@tt|t\}}|dkr<|t|t7}n|S(sdReturn the string payload filled with zero bytes up to the next 512 byte border. i(RMR RR!(tpayloadRRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt_create_payloadus cCsm|j||t}i}d|d<||d|j|S|jtttfkrc|j |S|j |SdS(sYChoose the right processing method depending on the type and call it. N( RRRt _proc_gnulongRt _proc_sparseRRtSOLARIS_XHDTYPEt _proc_paxt _proc_builtin(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR%s   cCsx|jj|_|j}|js6|jtkrO||j|j7}n||_|j |j |j |j |S(sfProcess a builtin type or an unknown type which will be treated as a regular file. ( RRRtisregRtSUPPORTED_TYPESt_blockRRt_apply_pax_infoRR$R%(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR+$s  cCs|jj|j|j}y|j|}Wntk rPtdnX|j|_|jt krt ||j |j |_ n-|jtkrt ||j |j |_n|S(sSProcess the blocks that hold a GNU longname or longlink member. s missing or bad subsequent header(RRKR.RR&RnRrRRRR*R$R%R~RR(R}RREtnext((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR'5s  c Cs|j\}}}|`x|r|jjt}d}xtdD]}}y6t|||d!}t||d|d!} Wntk rPnX|r| r|j|| fn|d7}qFWt|d}qW||_ |jj |_ |j |j |j |_||_ |S(s8Process a GNU sparse header plus extra headers. iii ii(RRRKRR1R5R/R_RRRRR.RR( R}RR R"R#RERR4RR!((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR(Ks(     cCs|jj|j|j}|jtkr9|j}n|jj}tj d|}|dk r|j dj d|ds  cCsx|jD]\}}|dkr8t|d|q |dkr]t|dt|q |dkrt|dt|q |tkr |tkryt||}Wqtk rd}qXn|dkr|jd}nt|||q q W|j|_dS( soReplace fields with supplemental information from a previous pax extended or global header. sGNU.sparse.nameRsGNU.sparse.sizeRsGNU.sparse.realsizeiRN( RtsetattrR.t PAX_FIELDStPAX_NUMBER_FIELDSR/RRR(R}RR$R%RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR/s"        cCs9y|j|dSWntk r4|j||SXdS(s1Decode a single field from a pax record. R,N(R(tUnicodeDecodeError(R}RR$tfallback_encodingtfallback_errors((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR:s cCs0t|t\}}|r(|d7}n|tS(s_Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. i(RMR(R}RRRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR. s cCs |jtkS(N(Rt REGULAR_TYPES(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR,scCs |jS(N(R,(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisfilescCs |jtkS(N(RR(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRscCs |jtkS(N(RtSYMTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytissymscCs |jtkS(N(RtLNKTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytislnkscCs |jtkS(N(RtCHRTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytischr scCs |jtkS(N(RtBLKTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisblk"scCs |jtkS(N(RtFIFOTYPE(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisfifo$scCs |jdk S(N(RRI(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytissparse&scCs|jtttfkS(N(RRTRVRX(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytisdev(s(R~RaRRRRRRRRRRRRRRRRRR(3RgRhRit __slots__RRRtpropertyRRRRRRtDEFAULT_FORMATtENCODINGRRRRt classmethodR Rt staticmethodRRRRR$R&R%R+R'R(R*R=R<R>R/R:R.R,RORRQRSRURWRYRZR[(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs`         1  3?    f             c Bs-eZdZdZeZeZdZeZ e Z d1Z eZeZd1dd1d1d1d1d1d1dd1d1d1d Zed1dd1edZedd1dZedd1dd Zedd1dd Zid d 6d d6dd6ZdZdZdZdZd1d1d1dZedZ d1ed1d1dZ!d1dZ"dd1dZ#dedZ$dZ%edZ&dZ'd Z(d!Z)d"Z*d#Z+d$Z,d%Z-d&Z.d'Z/d(Z0d1ed)Z1d*Z2d1d+Z3d,Z4d-Z5d.Z6d/Z7d0Z8RS(2s=The TarFile Class provides an interface to tar archives. iiRYRc Cst|dks|dkr-tdn||_idd6dd6dd 6||_|s|jdkrtjj| rd |_d|_nt||j}t|_ nN|d krt |d r|j }nt |d r|j|_nt |_ |rtjj|nd |_ ||_|d k rC||_n|d k r[||_n|d k rs||_n|d k r||_n|d k r||_n| |_| d k r|jtkr| |_n i|_| d k r| |_n| d k r | |_nt|_g|_t|_|jj|_i|_y9|jdkrod |_ |j!|_ n|jdkrxt r|jj"|jy&|jj#|}|jj$|Wqt%k r|jj"|jPqt&k r } t't(| qXqWn|jd krzt |_|jrz|jj)|jj*}|jj+||jt|7_qznWn,|j s|jj,nt |_nXd S(sOpen an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. iRsmode must be 'r', 'a' or 'w'trbRYsr+btatwbRZR~RatawN(-R R/Rat_modeRuRtexistst bltn_openRRRIRzR~RJtabspathRR>Rt dereferencet ignore_zerosR$R%RRtdebugt errorlevelRtmemberst_loadedRRtinodest firstmemberR0RR&R_RqRnRkRR RRLR(R}R~RaRR>RRjRkR$R%RRlRmteRE((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRFs  ""     !                             c Ks4| r| rtdn|dkrx|jD]}t||j|}|dk rj|j}ny||d||SWq3ttfk r} |dk r3|j|q3q3q3Xq3WtdnUd|krV|jdd\} }| pd} |pd}||jkr3t||j|}ntd|||| ||Sd |kr|jd d\} }| pd} |pd}| d krtd nt || |||} y||| | |} Wn| j nXt | _ | S|d kr$|j ||||Std dS(s|Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing snothing to openRYsr:*s%file could not be opened successfullyt:iRsunknown compression type %rt|trwsmode must be 'r' or 'w'Resundiscernible modeN(RYsr:*(R/t OPEN_METHRRIRRkRlRRERRRRttaropen( R R~RaRRtkwargsRtfunct saved_posRrRftstreamR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR{sN              cKs@t|dks|dkr-tdn|||||S(sCOpen uncompressed tar archive name for reading or writing. iRsmode must be 'r', 'a' or 'w'(R R/(R R~RaRRx((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRwsi c Ks6t|dks|dkr-tdnyddl}|jWn#ttfk ritdnX|dk }y8|j||d||}|j||||}Wnxt k r| r|dk r|j n|dkrnt dn*| r"|dk r"|j nnX||_ |S( skOpen gzip compressed tar archive name for reading or writing. Appending is not allowed. iRusmode must be 'r' or 'w'iNsgzip module is not availableRTsnot a gzip file( R R/tgziptGzipFileRtAttributeErrorRlRIRwRNRRkR( R R~RaRt compresslevelRxR|t extfileobjR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytgzopens.        cKst|dks|dkr-tdnyddl}Wntk r\tdnX|dk r{t||}n|j||d|}y|j||||}Wn-t t fk r|j t dnXt |_|S( slOpen bzip2 compressed tar archive name for reading or writing. Appending is not allowed. iRusmode must be 'r' or 'w'.iNsbz2 module is not availableRsnot a bzip2 file(R R/RRRlRIRtBZ2FileRwRNtEOFErrorRRkRR(R R~RaRRRxRR[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytbz2open$s     RwRRRRRcCs|jr dS|jdkr|jjttd|jtd7_t|jt\}}|dkr|jjtt|qn|j s|jj nt |_dS(slClose the TarFile. In write-mode, two finishing zero blocks are appended to the archive. NReii( RRaRRLR!RRRMt RECORDSIZERRRJ(R}RRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRHs   cCs2|j|}|dkr.td|n|S(sReturn a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. sfilename %r not foundN(t _getmemberRItKeyError(R}R~R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt getmember\s cCs'|j|js |jn|jS(sReturn the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. (t_checkRot_loadRn(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt getmembersgs   cCs g|jD]}|j^q S(sReturn the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). (RR~(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytgetnamesqsc Cs\|jd|d k r%|j}n|d kr:|}ntjj|\}}|jtjd}|jd}|j }||_ |d krt tdr|j rtj |}qtj|}ntj|j}d}|j}tj|r|j|jf} |j rj|jdkrj| |jkrj||j| krjt} |j| }qt} | dr||j| slink toN(RtprintRfRaRRRRRURWRRRRt localtimeRR~RRQRRS(R}tverboseR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRAs&   !)  c Cs|jd|dkr"|}n|dk rtddl}|jdtd||rt|jdd|dSn|jdk rtjj ||jkr|jdd|dS|jd||j ||}|dkr|jdd |dS|dk r;||}|dkr;|jdd|dSn|j rst |d }|j |||jn|jr|j ||rxTtj|D]@}|jtjj||tjj||||d |qWqn |j |dS( s~Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. ReiNsuse the filter argument insteadistarfile: Excluded %rstarfile: Skipped %ristarfile: Unsupported type %rRbtfilter(RRItwarningstwarntDeprecationWarningt_dbgR~RuRRiRR,RhtaddfileRRtlistdirtaddR`( R}R~Rt recursivetexcludeRRRtf((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRsD        *        *cCs|jdtj|}|j|j|j|j}|jj||jt |7_|dk rt ||j|j t |j t\}}|dkr|jjtt||d7}n|j|t7_n|jj|dS(s]Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. ReiiN(RRRR>R$R%RRLRR RIRURRMRR!RnR_(R}RRRERRRS((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR4s    t.cCs:g}|dkr|}nx_|D]W}|jr\|j|tj|}d|_n|j||d|j q"W|jdd|jx|D]}tj j ||j }y4|j |||j |||j||Wqtk r1}|jdkrq2|jdd|qXqWdS(sMExtract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). it set_attrstkeycSs|jS(N(R~(Rc((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pytdR]is tarfile: %sN(RIRR_RRatextracttsorttreverseRuRR`R~tchowntutimetchmodRjRmR(R}RRnt directoriesRtdirpathRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt extractallNs*      !  R]cCs=|jdt|tr.|j|}n|}|jr^tjj||j|_ ny,|j |tjj||j d|Wnt k r}|j dkrq9|jdkr|jdd|jq9|jdd|j|jfn<tk r8}|j dkr!q9|jdd|nXdS(sxExtract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. RYRiis tarfile: %sstarfile: %s %rN(RRRRRSRuRR`RRt_extract_memberR~tEnvironmentErrorRmtfilenameRIRtstrerrorRj(R}tmemberRRRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRts&  ! #cCs|jdt|tr.|j|}n|}|jrP|j||S|jtkro|j||S|js|j rt|j t rt dq|j |j|SndSdS(sExtract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() RYs'cannot extract (sym)link as file objectN(RRRRR,t fileobjectRR-RSRQRRRmt extractfilet_find_link_targetRI(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs  cCs|jd}|jdtj}tjj|}|r_tjj| r_tj|n|jsw|j r|j dd|j |j fn|j d|j |j r|j||n|jr|j||n|jr |j||n|js"|jr5|j||n]|jsM|j r`|j||n2|jtkr|j||n|j|||r|j|||j s|j|||j||qndS(s\Extract the TarInfo object tarinfo to a physical file called targetpath. Ris%s -> %sN(RRRuRRtdirnameRgtmakedirsRSRQRR~RR,tmakefileRtmakedirRYtmakefifoRURWtmakedevtmakelinkRR-t makeunknownRRR(R}Rt targetpathRt upperdirs((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs4#    cCsFytj|dWn+tk rA}|jtjkrBqBnXdS(s,Make a directory called targetpath. iN(RutmkdirRterrnotEEXIST(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs cCs|j}|j|jt|d}|jdk rqxJ|jD])\}}|j|t|||qAWnt|||j|j|j|j|j dS(s'Make a file called targetpath. RdN( RRRRhRRIRURttruncateR(R}RRtsourcettargetRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRs   cCs+|j|||jdd|jdS(sYMake a file from a TarInfo object with an unknown type at targetpath. is9tarfile: Unknown file type %r, extracted as regular file.N(RRR(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs/ttdrtj|n tddS(s'Make a fifo called targetpath. tmkfifosfifo not supported by systemN(RzRuRRj(R}RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCsttd s ttd r/tdn|j}|jrT|tjO}n |tjO}tj||tj |j |j dS(s<Make a character or block device called targetpath. tmknodRs'special devices not supported by systemN( RzRuRjRaRWRtS_IFBLKtS_IFCHRRRRR(R}RRRa((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s     cCsyj|jr%tj|j|nDtjj|jrPtj|j|n|j|j ||WnPt k r|jrtjj tjj |j |j}q|j}n>Xy|j|j ||Wntk rtdnXdS(sMake a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. s%unable to resolve link inside archiveN(RQRutsymlinkRRRgRtlinkRRtsymlink_exceptionR`RR~RRj(R}RRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR' s"       cCstrttdrtjdkrytj|jd}Wntk r]|j}nXytj |j d}Wntk r|j }nXyZ|j rttdrtj |||n%tjdkrtj|||nWqtk r}tdqXndS(s6Set owner of targetpath according to tarinfo. tgeteuidiitlchowntos2emxscould not change ownerN(RRzRuRRtgetgrnamRRRtgetpwnamRRRQRtsystplatformRRRj(R}RRRtuRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRD s '    cCsOttdrKytj||jWqKtk rG}tdqKXndS(sASet file permissions of targetpath according to tarinfo. Rscould not change modeN(RzRuRRaRRj(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRZ s cCsYttdsdSy tj||j|jfWntk rT}tdnXdS(sBSet modification time of targetpath according to tarinfo. RNs"could not change modification time(RzRuRRRRj(R}RRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyRc s  cCs|jd|jdk r2|j}d|_|S|jj|jd}xktry|jj|}WnGt k r}|j r|j dd|j|f|jt 7_qNqnt k r+}|j r|j dd|j|f|jt 7_qNq|jdkrtt|qntk rY|jdkrtdqn[tk r}|jdkrtt|qn%tk r}tt|nXPqNW|dk r|jj|n t|_|S(sReturn the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. trais0x%X: %sis empty fileN(RRqRIRRRRJRR&RqRkRRR0RkRRoRpRrRnR_Ro(R}tmRRr((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR0n sF          cCs|j}|dk r.||j| }n|rItjj|}nxKt|D]=}|rztjj|j}n |j}||krV|SqVWdS(s}Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. N(RRItindexRuRtnormpathtreversedR~(R}R~Rt normalizeRnRt member_name((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCs6x&tr(|j}|dkrPqqWt|_dS(sWRead through the entire archive file and look for readable members. N(RJR0RIRo(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCsW|jr"td|jjn|dk rS|j|krStd|jndS(snCheck if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. s %s is closedsbad operation for mode %rN(RRNRRgRIRa(R}Ra((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs|jr5tjj|jd|j}d}n|j}|}|j|d|dt}|dkr~t d|n|S(sZFind the target member of a symlink or hardlink member in the archive. RRRslinkname %r not foundN( RQRuRRR~RRIRRJR(R}RRtlimitR((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s     cCs$|jrt|jSt|SdS(s$Provide an iterator object. N(RotiterRntTarIter(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s  cCs)||jkr%t|dtjndS(s.Write debugging output to sys.stderr. tfileN(RlRRtstderr(R}tleveltmsg((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCs|j|S(N(R(R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt __enter__ s cCs?|dkr|jn"|js2|jjnt|_dS(N(RIRRRRJR(R}RRt traceback((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__exit__ s    N(9RgRhRiRlRRjRkRmR^R>R_R$RIR%RRRRRR`RR{RwRRRvRRRRRRJRARRRRRRRRRRRRRRRR0RRRRRRRR(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR,sn  iK   b > &# & 0       1    RcBs/eZdZdZdZdZeZRS(sMIterator Class. for tarinfo in TarFile(...): suite... cCs||_d|_dS(s$Construct a TarIter object. iN(RR(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s cCs|S(s Return iterator object. ((R}((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR scCs}|jjs9|jj}|sjt|j_tqjn1y|jj|j}Wntk ritnX|jd7_|S(sReturn the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. i(RRoR0RJt StopIterationRnRt IndexError(R}R((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyt__next__ s     (RgRhRiRRRR0(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR s    cCs7yt|}|jtSWntk r2tSXdS(sfReturn True if name points to a tar archive that we are able to handle, else return False. N(R{RRJRR(R~R[((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyR# s    (xt __future__Rt __version__tversiont __author__t__date__t __cvsid__t __credits__RRuRRRR8RR3RRRRIR~tNotImplementedErrorRt WindowsErrort NameErrort__all__t version_infot __builtin__tbuiltinsR{t_openR!RRRRRRR RRRRRPRTRVRRXtCONTTYPERRRRRR)RR7RR^R-RNRRItsetR;RR.RJtS_IFLNKtS_IFREGRtS_IFDIRRtS_IFIFOtTSUIDtTSGIDtTSVTXtTUREADtTUWRITEtTUEXECtTGREADtTGWRITEtTGEXECtTOREADtTOWRITEtTOEXECR~R_tgetfilesystemencodingR&R*R5R?RHRUR^Rft ExceptionRRjRkRlRmRnRoRpRqR0RrtobjectRsRRRRRRRRRRh(((sI/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyts.                                                 ?K* PK.e[q@SQQdistlib/_backport/sysconfig.pyonu[ abc @s_dZddlZddlZddlZddlZddlmZmZyddlZWne k r{ddl ZnXdddddd d d d d dg Z dZ ej rejje ej Zne ejZejdkr(dedjkr(e ejjeeZnejdkrndedjkrne ejjeeeZnejdkrdedjkre ejjeeeZndZeZeadZejZejdZdZejj dZ!ejd Z"e!de!dZ#ejj$ej%Z&ejj$ej'Z(da*dZ+dZ,dZ-d Z.d!Z/d"Z0d#Z1dd$Z2d%Z3d&Z4d'Z5dd(Z6d)Z7d*Z8d+Z9e0de:d,Z;e0de:d-Z<d.Z=d/Z>d0Z?d1Z@d2ZAd3ZBeCd4kr[eBndS(5s-Access to Python's configuration information.iN(tpardirtrealpathtget_config_h_filenametget_config_vartget_config_varstget_makefile_filenametget_pathtget_path_namest get_pathst get_platformtget_python_versiontget_scheme_namestparse_config_hcCs'yt|SWntk r"|SXdS(N(RtOSError(tpath((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_safe_realpath"s tnttpcbuildis\pc\vis\pcbuild\amd64icCs=x6dD].}tjjtjjtd|rtSqWtS(Ns Setup.dists Setup.localtModules(s Setup.dists Setup.local(tosRtisfiletjoint _PROJECT_BASEtTruetFalse(tfn((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pytis_python_build:s $cCstsddlm}tjddd}||}|jd}|j}tj|WdQXt rx7d D],}tj |d d tj |d d qvWnt andS(Ni(tfindert.iis sysconfig.cfgt posix_prefixt posix_hometincludes{srcdir}/Includet platincludes{projectbase}/.(RR( t _cfg_readt resourcesRt__name__trsplittfindt as_streamt_SCHEMEStreadfpt _PYTHON_BUILDtsetR(Rtbackport_packaget_findert_cfgfiletstscheme((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_ensure_cfg_readDs  s \{([^{]*?)\}cs-t|jdr(|jd}n t}|j}xb|D]Z}|dkr\qDnx?|D]7\}}|j||rqcn|j|||qcWqDW|jdxw|jD]i}t|j|fd}x<|j|D]+\}}|j||t j ||qWqWdS(Ntglobalscs0|jd}|kr#|S|jdS(Nii(tgroup(tmatchobjtname(t variables(sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _replaceros ( R0t has_sectiontitemsttupletsectionst has_optionR*tremove_sectiontdictt _VAR_REPLtsub(tconfigR1R:tsectiontoptiontvalueR6((R5sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_expand_globalsYs$     iiicsfd}tj||S(sIn the string `path`, replace tokens like {some.thing} with the corresponding value from the map `local_vars`. If there is no corresponding value, leave the token unchanged. csJ|jd}|kr#|S|tjkr=tj|S|jdS(Nii(R2Rtenviron(R3R4(t local_vars(sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR6s   (R>R?(RRFR6((RFsK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _subst_varsscCsI|j}x6|jD](\}}||kr7qn|||R?(RCRRR6((RRsK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt format_valuescCstjdkrdStjS(NRNR(RR4(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_get_default_schemescCstjjdd}d}tjdkr_tjjdpBd}|rO|S||dSntjdkrtd}|r|r|S|dd |d tjd Sqn|r|S|dd SdS( NtPYTHONUSERBASEcWstjjtjj|S(N(RRRPR(targs((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pytjoinusersRtAPPDATAt~tPythontdarwintPYTHONFRAMEWORKtLibrarys%d.%dis.local( RREtgetROR4tsystplatformRt version_info(tenv_baseRYtbaset framework((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _getuserbases"  cCstjd}tjd}tjd}|dkrBi}ni}i}tj|dddd}|j}WdQXx|D]} | jd s| jd krqn|j| } | r| j d d \} } | j} | j d d } d| kr| || dttjf}nd}tjjt d|dS(s Return the path of the Makefile.tMakefiletabiflagss config-%s%sR@tstdlib( R)RRRRthasattrRat_PY_VERSION_SHORTRR(tconfig_dir_name((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyRMs cCst}yt||WnLtk rh}d|}t|drY|d|j}nt|nXt}y&t|}t||WdQXWnLtk r}d|}t|dr|d|j}nt|nXtr|d|dkrd4}qI|d?krd5}qIt d6|fqL|d.krtj!d@krId0}qIqL|dAkrLtj!dBkr@d3}qId/}qLqOnd:|||fS(CsReturn a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. Rs bit (it)tamd64s win-amd64titaniumswin-ia64RNRt/RlRt_t-itlinuxs%s-%stsunosit5tsolariss%d.%siiitirixtaixs%s-%s.%sitcygwins[\d.]+R]tMACOSX_DEPLOYMENT_TARGETs0/System/Library/CoreServices/SystemVersion.plists=ProductUserVisibleVersion\s*(.*?)NRitmacosxs10.4.s-archRntfats -arch\s+(\S+)ti386tppctx86_64tinteltfat3tppc64tfat64t universals%Don't know machine value for archs=%ri tPowerPCtPower_Macintoshs%s-%s-%s(RR(RR(RRR(RR(RRRRI(RRI("RR4RaRR%RbR~tlowerRRRzR{RrRsRyR2RR`RRuRRtreadtcloseRORRRxtfindallR9RR*R|tmaxsize(RtitjtlooktosnamethosttreleaseRtmachinetrel_reRtcfgvarstmacvert macreleaseRtcflagstarchs((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR [s    (     + !               cCstS(N(R(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR scCsZxStt|jD]9\}\}}|dkrCd|GHnd||fGHqWdS(Nis%s: s %s = "%s"(t enumerateRR8(ttitletdatatindexRLRC((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _print_dicts+  cCsRdtGHdtGHdtGHdGHtdtdGHtdtdS( s*Display all information sysconfig detains.sPlatform: "%s"sPython version: "%s"s!Current installation scheme: "%s"tPathst VariablesN(((R R RVRRR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_mains   t__main__(Dt__doc__RtRRrRatos.pathRRt configparsert ImportErrort ConfigParsert__all__RRRRRRR4RRRR)RR!R0tRawConfigParserR'RsR>RDRRRRRRQRRRRRORt _USER_BASERGRMRTRURVRgRRRRR RR RRRRRRR R RRR#(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyts        %%!%!     #      v         PK.e[t(distlib/_backport/__init__.pycnu[ abc@s dZdS(s Modules copied from Python 3 standard libraries, for internal use only. Individual classes and functions are found in d2._backport.misc. Intended usage is to always import things missing from 3.1 from that module: the built-in/stdlib objects will be used if found. N(t__doc__(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.pyttPK.e[l>distlib/_backport/misc.pycnu[ abc@sdZddlZddlZdddgZyddlmZWnek r`edZnXy eZWn*e k rddl m Z d ZnXy ej Z Wne k rd Z nXdS( s/Backports for individual classes and functions.iNtcache_from_sourcetcallabletfsencode(RcCs|r dpd}||S(Ntcto((tpy_filetdebugtext((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyRs(tCallablecCs t|tS(N(t isinstanceR(tobj((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyRscCsRt|tr|St|tr5|jtjStdt|jdS(Nsexpect bytes or str, not %s( R tbyteststrtencodetsystgetfilesystemencodingt TypeErrorttypet__name__(tfilename((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyR"s (t__doc__tosRt__all__timpRt ImportErrort __debug__Rt NameErrort collectionsRRtAttributeError(((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyts         PK.e[6RKiKidistlib/_backport/sysconfig.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Access to Python's configuration information.""" import codecs import os import re import sys from os.path import pardir, realpath try: import configparser except ImportError: import ConfigParser as configparser __all__ = [ 'get_config_h_filename', 'get_config_var', 'get_config_vars', 'get_makefile_filename', 'get_path', 'get_path_names', 'get_paths', 'get_platform', 'get_python_version', 'get_scheme_names', 'parse_config_h', ] def _safe_realpath(path): try: return realpath(path) except OSError: return path if sys.executable: _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) else: # sys.executable can be empty if argv[0] has been changed and Python is # unable to retrieve the real program name _PROJECT_BASE = _safe_realpath(os.getcwd()) if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) # PC/VS7.1 if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) # PC/AMD64 if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) def is_python_build(): for fn in ("Setup.dist", "Setup.local"): if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): return True return False _PYTHON_BUILD = is_python_build() _cfg_read = False def _ensure_cfg_read(): global _cfg_read if not _cfg_read: from ..resources import finder backport_package = __name__.rsplit('.', 1)[0] _finder = finder(backport_package) _cfgfile = _finder.find('sysconfig.cfg') assert _cfgfile, 'sysconfig.cfg exists' with _cfgfile.as_stream() as s: _SCHEMES.readfp(s) if _PYTHON_BUILD: for scheme in ('posix_prefix', 'posix_home'): _SCHEMES.set(scheme, 'include', '{srcdir}/Include') _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') _cfg_read = True _SCHEMES = configparser.RawConfigParser() _VAR_REPL = re.compile(r'\{([^{]*?)\}') def _expand_globals(config): _ensure_cfg_read() if config.has_section('globals'): globals = config.items('globals') else: globals = tuple() sections = config.sections() for section in sections: if section == 'globals': continue for option, value in globals: if config.has_option(section, option): continue config.set(section, option, value) config.remove_section('globals') # now expanding local variables defined in the cfg file # for section in config.sections(): variables = dict(config.items(section)) def _replacer(matchobj): name = matchobj.group(1) if name in variables: return variables[name] return matchobj.group(0) for option, value in config.items(section): config.set(section, option, _VAR_REPL.sub(_replacer, value)) #_expand_globals(_SCHEMES) # FIXME don't rely on sys.version here, its format is an implementation detail # of CPython, use sys.version_info or sys.hexversion _PY_VERSION = sys.version.split()[0] _PY_VERSION_SHORT = sys.version[:3] _PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] _PREFIX = os.path.normpath(sys.prefix) _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) _CONFIG_VARS = None _USER_BASE = None def _subst_vars(path, local_vars): """In the string `path`, replace tokens like {some.thing} with the corresponding value from the map `local_vars`. If there is no corresponding value, leave the token unchanged. """ def _replacer(matchobj): name = matchobj.group(1) if name in local_vars: return local_vars[name] elif name in os.environ: return os.environ[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, path) def _extend_dict(target_dict, other_dict): target_keys = target_dict.keys() for key, value in other_dict.items(): if key in target_keys: continue target_dict[key] = value def _expand_vars(scheme, vars): res = {} if vars is None: vars = {} _extend_dict(vars, get_config_vars()) for key, value in _SCHEMES.items(scheme): if os.name in ('posix', 'nt'): value = os.path.expanduser(value) res[key] = os.path.normpath(_subst_vars(value, vars)) return res def format_value(value, vars): def _replacer(matchobj): name = matchobj.group(1) if name in vars: return vars[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, value) def _get_default_scheme(): if os.name == 'posix': # the default scheme for posix is posix_prefix return 'posix_prefix' return os.name def _getuserbase(): env_base = os.environ.get("PYTHONUSERBASE", None) def joinuser(*args): return os.path.expanduser(os.path.join(*args)) # what about 'os2emx', 'riscos' ? if os.name == "nt": base = os.environ.get("APPDATA") or "~" if env_base: return env_base else: return joinuser(base, "Python") if sys.platform == "darwin": framework = get_config_var("PYTHONFRAMEWORK") if framework: if env_base: return env_base else: return joinuser("~", "Library", framework, "%d.%d" % sys.version_info[:2]) if env_base: return env_base else: return joinuser("~", ".local") def _parse_makefile(filename, vars=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") if vars is None: vars = {} done = {} notdone = {} with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: lines = f.readlines() for line in lines: if line.startswith('#') or line.strip() == '': continue m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # do variable interpolation here variables = list(notdone.keys()) # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') while len(variables) > 0: for name in tuple(variables): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m is not None: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if (name.startswith('PY_') and name[3:] in renamed_variables): item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value variables.remove(name) if (name.startswith('PY_') and name[3:] in renamed_variables): name = name[3:] if name not in done: done[name] = value else: # bogus variable reference (e.g. "prefix=$/opt/python"); # just drop it since we can't deal done[name] = value variables.remove(name) # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary vars.update(done) return vars def get_makefile_filename(): """Return the path of the Makefile.""" if _PYTHON_BUILD: return os.path.join(_PROJECT_BASE, "Makefile") if hasattr(sys, 'abiflags'): config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) else: config_dir_name = 'config' return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" # load the installed Makefile: makefile = get_makefile_filename() try: _parse_makefile(makefile, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % makefile if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # load the installed pyconfig.h: config_h = get_config_h_filename() try: with open(config_h) as f: parse_config_h(f, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % config_h if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. if _PYTHON_BUILD: vars['LDSHARED'] = vars['BLDSHARED'] def _init_non_posix(vars): """Initialize the module as appropriate for NT""" # set basic install directories vars['LIBDEST'] = get_path('stdlib') vars['BINLIBDEST'] = get_path('platstdlib') vars['INCLUDEPY'] = get_path('include') vars['SO'] = '.pyd' vars['EXE'] = '.exe' vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) # # public APIs # def parse_config_h(fp, vars=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if vars is None: vars = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars def get_config_h_filename(): """Return the path of pyconfig.h.""" if _PYTHON_BUILD: if os.name == "nt": inc_dir = os.path.join(_PROJECT_BASE, "PC") else: inc_dir = _PROJECT_BASE else: inc_dir = get_path('platinclude') return os.path.join(inc_dir, 'pyconfig.h') def get_scheme_names(): """Return a tuple containing the schemes names.""" return tuple(sorted(_SCHEMES.sections())) def get_path_names(): """Return a tuple containing the paths names.""" # xxx see if we want a static list return _SCHEMES.options('posix_prefix') def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): """Return a mapping containing an install scheme. ``scheme`` is the install scheme name. If not provided, it will return the default scheme for the current platform. """ _ensure_cfg_read() if expand: return _expand_vars(scheme, vars) else: return dict(_SCHEMES.items(scheme)) def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. On Unix, this means every variable defined in Python's installed Makefile; On Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _CONFIG_VARS if _CONFIG_VARS is None: _CONFIG_VARS = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # distutils2 module. _CONFIG_VARS['prefix'] = _PREFIX _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX _CONFIG_VARS['py_version'] = _PY_VERSION _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE try: _CONFIG_VARS['abiflags'] = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. _CONFIG_VARS['abiflags'] = '' if os.name in ('nt', 'os2'): _init_non_posix(_CONFIG_VARS) if os.name == 'posix': _init_posix(_CONFIG_VARS) # Setting 'userbase' is done below the call to the # init function to enable using 'get_config_var' in # the init-function. if sys.version >= '2.6': _CONFIG_VARS['userbase'] = _getuserbase() if 'srcdir' not in _CONFIG_VARS: _CONFIG_VARS['srcdir'] = _PROJECT_BASE else: _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if _PYTHON_BUILD and os.name == "posix": base = _PROJECT_BASE try: cwd = os.getcwd() except OSError: cwd = None if (not os.path.isabs(_CONFIG_VARS['srcdir']) and base != cwd): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) if sys.platform == 'darwin': kernel_version = os.uname()[2] # Kernel version (8.4.3) major_version = int(kernel_version.split('.')[0]) if major_version < 8: # On macOS before 10.4, check if -arch and -isysroot # are in CFLAGS or LDFLAGS and remove them if they are. # This is needed when building extensions on a 10.3 system # using a universal build of python. for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub('-arch\s+\w+\s', ' ', flags) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _CONFIG_VARS[key] = flags else: # Allow the user to override the architecture flags using # an environment variable. # NOTE: This name was introduced by Apple in OSX 10.5 and # is used by several scripting languages distributed with # that OS release. if 'ARCHFLAGS' in os.environ: arch = os.environ['ARCHFLAGS'] for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub('-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _CONFIG_VARS[key] = flags # If we're on OSX 10.5 or later and the user tries to # compiles an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. # # The major usecase for this is users using a Python.org # binary installer on OSX 10.6: that installer uses # the 10.4u SDK, but that SDK is not installed by default # when you install Xcode. # CFLAGS = _CONFIG_VARS.get('CFLAGS', '') m = re.search('-isysroot\s+(\S+)', CFLAGS) if m is not None: sdk = m.group(1) if not os.path.exists(sdk): for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags) _CONFIG_VARS[key] = flags if args: vals = [] for name in args: vals.append(_CONFIG_VARS.get(name)) return vals else: return _CONFIG_VARS def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name) def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = sys.version.find(prefix) if i == -1: return sys.platform j = sys.version.find(")", i) look = sys.version[i+len(prefix):j].lower() if look == 'amd64': return 'win-amd64' if look == 'itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix osname, host, release, version, machine = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. cfgvars = get_config_vars() macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if True: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search(r'ProductUserVisibleVersion\s*' r'(.*?)', f.read()) finally: f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if not macver: macver = macrelease if macver: release = macver osname = "macosx" if ((macrelease + '.') >= '10.4.' and '-arch' in get_config_vars().get('CFLAGS', '').strip()): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall('-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError( "Don't know machine value for archs=%r" % (archs,)) elif machine == 'i386': # On OSX the machine type returned by uname is always the # 32-bit variant, even if the executable architecture is # the 64-bit variant if sys.maxsize >= 2**32: machine = 'x86_64' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. # See 'i386' case if sys.maxsize >= 2**32: machine = 'ppc64' else: machine = 'ppc' return "%s-%s-%s" % (osname, release, machine) def get_python_version(): return _PY_VERSION_SHORT def _print_dict(title, data): for index, (key, value) in enumerate(sorted(data.items())): if index == 0: print('%s: ' % (title)) print('\t%s = "%s"' % (key, value)) def _main(): """Display all information sysconfig detains.""" print('Platform: "%s"' % get_platform()) print('Python version: "%s"' % get_python_version()) print('Current installation scheme: "%s"' % _get_default_scheme()) print() _print_dict('Paths', get_paths()) print() _print_dict('Variables', get_config_vars()) if __name__ == '__main__': _main() PK.e[l>distlib/_backport/misc.pyonu[ abc@sdZddlZddlZdddgZyddlmZWnek r`edZnXy eZWn*e k rddl m Z d ZnXy ej Z Wne k rd Z nXdS( s/Backports for individual classes and functions.iNtcache_from_sourcetcallabletfsencode(RcCs|r dpd}||S(Ntcto((tpy_filetdebugtext((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyRs(tCallablecCs t|tS(N(t isinstanceR(tobj((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyRscCsRt|tr|St|tr5|jtjStdt|jdS(Nsexpect bytes or str, not %s( R tbyteststrtencodetsystgetfilesystemencodingt TypeErrorttypet__name__(tfilename((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyR"s (t__doc__tosRt__all__timpRt ImportErrort __debug__Rt NameErrort collectionsRRtAttributeError(((sF/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyts         PK.e[t(distlib/_backport/__init__.pyonu[ abc@s dZdS(s Modules copied from Python 3 standard libraries, for internal use only. Individual classes and functions are found in d2._backport.misc. Intended usage is to always import things missing from 3.1 from that module: the built-in/stdlib objects will be used if found. N(t__doc__(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.pyttPK.e[਱BQBQdistlib/_backport/sysconfig.pycnu[ abc @s_dZddlZddlZddlZddlZddlmZmZyddlZWne k r{ddl ZnXdddddd d d d d dg Z dZ ej rejje ej Zne ejZejdkr(dedjkr(e ejjeeZnejdkrndedjkrne ejjeeeZnejdkrdedjkre ejjeeeZndZeZeadZejZejdZdZejj dZ!ejd Z"e!de!dZ#ejj$ej%Z&ejj$ej'Z(da*dZ+dZ,dZ-d Z.d!Z/d"Z0d#Z1dd$Z2d%Z3d&Z4d'Z5dd(Z6d)Z7d*Z8d+Z9e0de:d,Z;e0de:d-Z<d.Z=d/Z>d0Z?d1Z@d2ZAd3ZBeCd4kr[eBndS(5s-Access to Python's configuration information.iN(tpardirtrealpathtget_config_h_filenametget_config_vartget_config_varstget_makefile_filenametget_pathtget_path_namest get_pathst get_platformtget_python_versiontget_scheme_namestparse_config_hcCs'yt|SWntk r"|SXdS(N(RtOSError(tpath((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_safe_realpath"s tnttpcbuildis\pc\vis\pcbuild\amd64icCs=x6dD].}tjjtjjtd|rtSqWtS(Ns Setup.dists Setup.localtModules(s Setup.dists Setup.local(tosRtisfiletjoint _PROJECT_BASEtTruetFalse(tfn((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pytis_python_build:s $cCstsddlm}tjddd}||}|jd}|sYtd|j}tj |WdQXt rx7dD],}tj |d d tj |d d qWnt andS(Ni(tfindert.iis sysconfig.cfgssysconfig.cfg existst posix_prefixt posix_hometincludes{srcdir}/Includet platincludes{projectbase}/.(RR( t _cfg_readt resourcesRt__name__trsplittfindtAssertionErrort as_streamt_SCHEMEStreadfpt _PYTHON_BUILDtsetR(Rtbackport_packaget_findert_cfgfiletstscheme((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_ensure_cfg_readDs  s \{([^{]*?)\}cs-t|jdr(|jd}n t}|j}xb|D]Z}|dkr\qDnx?|D]7\}}|j||rqcn|j|||qcWqDW|jdxw|jD]i}t|j|fd}x<|j|D]+\}}|j||t j ||qWqWdS(Ntglobalscs0|jd}|kr#|S|jdS(Nii(tgroup(tmatchobjtname(t variables(sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _replaceros ( R1t has_sectiontitemsttupletsectionst has_optionR+tremove_sectiontdictt _VAR_REPLtsub(tconfigR2R;tsectiontoptiontvalueR7((R6sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_expand_globalsYs$     iiicsfd}tj||S(sIn the string `path`, replace tokens like {some.thing} with the corresponding value from the map `local_vars`. If there is no corresponding value, leave the token unchanged. csJ|jd}|kr#|S|tjkr=tj|S|jdS(Nii(R3Rtenviron(R4R5(t local_vars(sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR7s   (R?R@(RRGR7((RGsK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _subst_varsscCsI|j}x6|jD](\}}||kr7qn|||dttjf}nd}tjjt d|dS(s Return the path of the Makefile.tMakefiletabiflagss config-%s%sRAtstdlib( R*RRRRthasattrRbt_PY_VERSION_SHORTRR(tconfig_dir_name((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyRMs cCst}yt||WnLtk rh}d|}t|drY|d|j}nt|nXt}y&t|}t||WdQXWnLtk r}d|}t|dr|d|j}nt|nXtr|d|dR(R9(R0RStexpand((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyRs cCst||||S(s[Return a path corresponding to the scheme. ``scheme`` is the install scheme name. (R(R5R0RSR((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyRscGstd"krRiattdkrd4}qI|d?krd5}qIt d6|fqL|d.krtj!d@krId0}qIqL|dAkrLtj!dBkr@d3}qId/}qLqOnd:|||fS(CsReturn a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. Rs bit (it)tamd64s win-amd64titaniumswin-ia64RORt/RmRt_t-itlinuxs%s-%stsunosit5tsolariss%d.%siiitirixtaixs%s-%s.%sitcygwins[\d.]+R^tMACOSX_DEPLOYMENT_TARGETs0/System/Library/CoreServices/SystemVersion.plists=ProductUserVisibleVersion\s*(.*?)NRitmacosxs10.4.s-archRotfats -arch\s+(\S+)ti386tppctx86_64tinteltfat3tppc64tfat64t universals%Don't know machine value for archs=%ri tPowerPCtPower_Macintoshs%s-%s-%s(RR(RR(RRR(RR(RRRRI(RRI("RR5RbRR%RcRtlowerRRR{R|RsRtRzR3RRaRRvRRtreadtcloseRPRRRytfindallR:RR+R}tmaxsize(RtitjtlooktosnamethosttreleaseRtmachinetrel_reRtcfgvarstmacvert macreleaseRtcflagstarchs((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR [s    (     + !               cCstS(N(R(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyR scCsZxStt|jD]9\}\}}|dkrCd|GHnd||fGHqWdS(Nis%s: s %s = "%s"(t enumerateRR9(ttitletdatatindexRMRD((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt _print_dicts+  cCsRdtGHdtGHdtGHdGHtdtdGHtdtdS( s*Display all information sysconfig detains.sPlatform: "%s"sPython version: "%s"s!Current installation scheme: "%s"tPathst VariablesN(((R R RWRRR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyt_mains   t__main__(Dt__doc__RuRRsRbtos.pathRRt configparsert ImportErrort ConfigParsert__all__RRRRRRR5RRRR*RR!R1tRawConfigParserR(RtR?RERRRRRRRRRRRRPRt _USER_BASERHRNRURVRWRhRRRRR RR RRRRRRR R RRR#(((sK/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyts        %%!%!     #      v         PK.e[;VgVgdistlib/_backport/shutil.pycnu[ abc@s"dZddlZddlZddlZddlmZddlZddlZddlZddl m Z yddl Z e Z Wnek reZ nXyddlmZWnek rdZnXyddlmZWnek rdZnXdd d d d d dddddddddddddddgZdefdYZdefdYZdefdYZd efd!YZd"efd#YZyeWnek rdZnXdWd&Zd'Z d(Z!d)Z"d*Z#d+Z$d,Z%d-Z&ede%ed.Z'edd/Z(d0Z)d1Z*d2Z+d3Z,d4Z-d5d6d6dddd7Z.eed8Z/d6d6dd9Z0ie.dXgd;fd<6e.dYgd>fd?6e.dZgd@fdA6e0gdBfdC6Z1e re.d[gd>fe1d?fe=d?dddVZ?dS(\sUtility functions for copying and archiving files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. iN(tabspathi(ttarfile(tgetpwnam(tgetgrnamt copyfileobjtcopyfiletcopymodetcopystattcopytcopy2tcopytreetmovetrmtreetErrortSpecialFileErrort ExecErrort make_archivetget_archive_formatstregister_archive_formattunregister_archive_formattget_unpack_formatstregister_unpack_formattunregister_unpack_formattunpack_archivetignore_patternscBseZRS((t__name__t __module__(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR ,scBseZdZRS(s|Raised when trying to do a kind of operation (e.g. copying) which is not supported on a special file (e.g. a named pipe)(RRt__doc__(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR/scBseZdZRS(s+Raised when a command could not be executed(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR3st ReadErrorcBseZdZRS(s%Raised when an archive cannot be read(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR6st RegistryErrorcBseZdZRS(sVRaised when a registry operation with the archiving and unpacking registries fails(RRR(((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR9siicCs1x*|j|}|sPn|j|qWdS(s=copy data from file-like object fsrc to file-like object fdstN(treadtwrite(tfsrctfdsttlengthtbuf((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRCs cCs{ttjdrAytjj||SWqAtk r=tSXntjjtjj|tjjtjj|kS(Ntsamefile(thasattrtostpathR$tOSErrortFalsetnormcaseR(tsrctdst((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt _samefileKs c Cst||r(td||fnx`||gD]R}ytj|}Wntk raq5Xtj|jr5td|q5q5Wt|d,}t|d}t ||WdQXWdQXdS(sCopy data from src to dsts`%s` and `%s` are the same files`%s` is a named pipetrbtwbN( R-R R&tstatR(tS_ISFIFOtst_modeRtopenR(R+R,tfntstR R!((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRWs cCsGttdrCtj|}tj|j}tj||ndS(sCopy mode bits from src to dsttchmodN(R%R&R0tS_IMODER2R6(R+R,R5tmode((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRkscCstj|}tj|j}ttdrOtj||j|jfnttdrqtj||nttdrt|drytj ||j Wqt k r}tt d s|j t j krqqXndS(sCCopy all stat info (mode bits, atime, mtime, flags) from src to dsttutimeR6tchflagstst_flagst EOPNOTSUPPN(R&R0R7R2R%R9tst_atimetst_mtimeR6R:R;R(terrnoR<(R+R,R5R8twhy((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRrscCsTtjj|r6tjj|tjj|}nt||t||dS(sVCopy data and mode bits ("cp src dst"). The destination may be a directory. N(R&R'tisdirtjointbasenameRR(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRs$ cCsTtjj|r6tjj|tjj|}nt||t||dS(s]Copy data and all stat info ("cp -p src dst"). The destination may be a directory. N(R&R'RARBRCRR(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR s$ csfd}|S(sFunction that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude filescs:g}x'D]}|jtj||q Wt|S(N(textendtfnmatchtfiltertset(R'tnamest ignored_namestpattern(tpatterns(sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_ignore_patternss ((RKRL((RKsH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRscCs tj|}|dk r-|||}n t}tj|g}xG|D]?} | |krhqPntjj|| } tjj|| } ytjj| rtj| } |rtj | | q6tjj |  r|rwPn|| | n8tjj | r)t | | |||n || | WqPt k r`} |j| jdqPtk r}|j| | t|fqPXqPWyt||WnMtk r}tdk rt|trq|j||t|fnX|r t |ndS(sRecursively copy a directory tree. The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. If the file pointed by the symlink doesn't exist, an exception will be added in the list of errors raised in an Error exception at the end of the copy process. You can set the optional ignore_dangling_symlinks flag to true if you want to silence this exception. Notice that this has no effect on platforms that don't support os.symlink. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. The optional copy_function argument is a callable that will be used to copy each file. It will be called with the source path and the destination path as arguments. By default, copy2() is used, but any function that supports the same signature (like copy()) can be used. iN(R&tlistdirtNoneRGtmakedirsR'RBtislinktreadlinktsymlinktexistsRAR R RDtargstEnvironmentErrortappendtstrRR(t WindowsErrort isinstance(R+R,tsymlinkstignoret copy_functiontignore_dangling_symlinksRHRIterrorstnametsrcnametdstnametlinktoterrR@((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR sD$     $ cCs|rd}n|dkr*d}ny%tjj|rNtdnWn.tk r|tjj|tjdSXg}ytj|}Wn-tjk r|tj|tjnXx|D]}tjj ||}ytj |j }Wntjk rd}nXt j |r@t|||qytj|Wqtjk r|tj|tjqXqWytj|Wn-tjk r|tj|tjnXdS(sRecursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. cWsdS(N((RT((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pytonerrorscWsdS(N((RT((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRdss%Cannot call rmtree on a symbolic linkNi(RNR&R'RPR(tsystexc_infoRMterrorRBtlstatR2R0tS_ISDIRR tremovetrmdir(R't ignore_errorsRdRHR_tfullnameR8((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR s>       !cCstjj|jtjjS(N(R&R'RCtrstriptsep(R'((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt _basename'scCs|}tjj|r~t||r;tj||dStjj|t|}tjj|r~td|q~nytj||Wnt k rtjj|rt ||rtd||fnt ||dt t |qt||tj|nXdS(sRecursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. Ns$Destination path '%s' already existss.Cannot move a directory '%s' into itself '%s'.RZ(R&R'RAR-trenameRBRpRSR R(t _destinsrcR tTrueR R tunlink(R+R,treal_dst((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyR ,s$   cCsut|}t|}|jtjjs@|tjj7}n|jtjjsh|tjj7}n|j|S(N(RtendswithR&R'Rot startswith(R+R,((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyRrTs  cCs^tdks|dkrdSyt|}Wntk rEd}nX|dk rZ|dSdS(s"Returns a gid, given a group name.iN(RRNtKeyError(R_tresult((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_get_gid]s   cCs^tdks|dkrdSyt|}Wntk rEd}nX|dk rZ|dSdS(s"Returns an uid, given a user name.iN(RRNRx(R_Ry((sH/usr/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyt_get_uidis   tgzipics|idd6dd6}idd6} tr>d|d s                       Q1  ( =/    6     %   PK.e[#gdistlib/_backport/__init__.pynu["""Modules copied from Python 3 standard libraries, for internal use only. Individual classes and functions are found in d2._backport.misc. Intended usage is to always import things missing from 3.1 from that module: the built-in/stdlib objects will be used if found. """ PK.e[:}yydistlib/wheel.pycnu[ abc@sddlmZddlZddlZddlZddlZddlmZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlZddlZddlZddlmZmZddlmZmZmZmZmZddlmZddlm Z m!Z!dd l"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+dd l,m-Z-m.Z.e j/e0Z1e2a3e4ed rd Z5n9ej6j7d rdZ5nej6dkrdZ5ndZ5ej8dZ9e9 rdej:d Z9nde9Z;e5e9Z<ej"j=j>ddj>ddZ?ej8dZ@e@oze@j7dre@j>ddZ@ndZAeAZ@[AejBdejCejDBZEejBdejCejDBZFejBdZGejBdZHd ZId!ZJe jKd"kr$d#ZLn d$ZLd%eMfd&YZNeNZOd'eMfd(YZPd)ZQeQZR[Qe2d*ZSdS(+i(tunicode_literalsN(tmessage_from_filei(t __version__tDistlibException(t sysconfigtZipFiletfsdecodet text_typetfilter(tInstalledDistribution(tMetadatatMETADATA_FILENAME( t FileOperatort convert_patht CSVReadert CSVWritertCachetcached_propertytget_cache_baset read_exportsttempdir(tNormalizedVersiontUnsupportedVersionErrorupypy_version_infouppujavaujyucliuipucpupy_version_nodotu%s%siupyu-u_u.uSOABIucpython-cCs|dtg}tjdr+|jdntjdrJ|jdntjddkro|jdnd j|S( NucpuPy_DEBUGudu WITH_PYMALLOCumuPy_UNICODE_SIZEiuuu(t VER_SUFFIXRtget_config_vartappendtjoin(tparts((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt _derive_abi;s uz (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))? -(?P\w+\d+(\.\w+\d+)*) -(?P\w+) -(?P\w+(\.\w+)*) \.whl$ u7 (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))?$ s \s*#![^\r\n]*s^(\s*#!("[^"]+"|\S+))\s+(.*)$s#!pythons #!pythonwu/cCs|S(N((to((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt]tcCs|jtjdS(Nu/(treplacetostsep(R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR_RtMountercBs8eZdZdZdZddZdZRS(cCsi|_i|_dS(N(t impure_wheelstlibs(tself((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt__init__cs cCs!||j|<|jj|dS(N(R$R%tupdate(R&tpathnamet extensions((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytaddgs cCsI|jj|}x0|D](\}}||jkr|j|=qqWdS(N(R$tpopR%(R&R)R*tktv((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytremovekscCs"||jkr|}nd}|S(N(R%tNone(R&tfullnametpathtresult((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt find_moduleqs cCs|tjkrtj|}nx||jkrAtd|ntj||j|}||_|jdd}t|dkr|d|_ n|S(Nuunable to find extension for %su.ii( tsystmodulesR%t ImportErrortimpt load_dynamict __loader__trsplittlent __package__(R&R1R3R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt load_modulexs N(t__name__t __module__R'R+R/R0R4R>(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR#bs     tWheelcBseZdZdZdZdeedZedZ edZ edZ e dZ dZe d Zd Zdd Zd Zd ZdZdddZdZdZdZdZdZedZdZdZddZRS(u@ Class to build and install from Wheel files (PEP 427). iusha256cCs||_||_d|_tg|_dg|_dg|_tj|_ |dkr{d|_ d|_ |j |_nEtj|}|r|jd}|d|_ |djdd |_ |d |_|j |_ntjj|\}}tj|}|s!td |n|r?tjj||_ n||_|jd}|d|_ |d|_ |d |_|d jd |_|djd |_|djd |_dS(uB Initialise an instance using a (valid) filename. uunoneuanyudummyu0.1unmuvnu_u-ubnuInvalid name or filename: %rupyu.ubiuarN(tsignt should_verifytbuildvertPYVERtpyvertabitarchR!tgetcwdtdirnameR0tnametversiontfilenamet _filenametNAME_VERSION_REtmatcht groupdictR R2tsplitt FILENAME_RERtabspath(R&RMRBtverifytmtinfoRJ((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR'sB                cCs|jrd|j}nd}dj|j}dj|j}dj|j}|jjdd}d|j|||||fS(uJ Build and return a filename from the various components. u-uu.u_u%s-%s%s-%s-%s-%s.whl(RDRRFRGRHRLR RK(R&RDRFRGRHRL((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRMs cCs+tjj|j|j}tjj|S(N(R!R2RRJRMtisfile(R&R2((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytexistssccsNxG|jD]<}x3|jD](}x|jD]}|||fVq*WqWq WdS(N(RFRGRH(R&RFRGRH((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyttagssc Cs8tjj|j|j}d|j|jf}d|}tjd}t |d}|j |}|dj dd}t g|D]}t |^q} | d krd} nt} yItj|| } |j| "} || } td | }WdQXWn!tk r-td | nXWdQX|S( Nu%s-%su %s.dist-infouutf-8uru Wheel-Versionu.iuMETADATAtfileobju$Invalid wheel, because %s is missing(ii(R!R2RRJRMRKRLtcodecst getreaderRtget_wheel_metadataRRttupletintR t posixpathtopenR tKeyErrort ValueError(R&R)tname_vertinfo_dirtwrappertzftwheel_metadatatwvtit file_versiontfntmetadata_filenametbftwfR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytmetadatas( %    cCsud|j|jf}d|}tj|d}|j|(}tjd|}t|}WdQXt|S(Nu%s-%su %s.dist-infouWHEELuutf-8( RKRLRaRRbR\R]Rtdict(R&RhReRfRnRoRptmessage((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR^s cCsFtjj|j|j}t|d}|j|}WdQX|S(Nur(R!R2RRJRMRR^(R&R)RhR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRWsc Cstj|}|r|j}|| ||}}d|jkrQt}nt}tj|}|rd|jd}nd}||}||}ns|jd}|jd} |dks|| krd} n&|||d!d krd } nd} t| |}|S( Ntpythonwt iRs s iis ( t SHEBANG_RERPtendtlowertSHEBANG_PYTHONWtSHEBANG_PYTHONtSHEBANG_DETAIL_REtgroupstfind( R&tdataRVRwtshebangtdata_after_shebangtshebang_pythontargstcrtlftterm((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytprocess_shebangs,      cCs|dkr|j}nytt|}Wn!tk rNtd|nX||j}tj|j dj d}||fS(NuUnsupported hash algorithm: %rt=uascii( R0t hash_kindtgetattrthashlibtAttributeErrorRtdigesttbase64turlsafe_b64encodetrstriptdecode(R&R~RthasherR3((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytget_hashs   !cCs~t|}ttjj||}|j|ddf|jt|%}x|D]}|j|q]WWdQXdS(Nu( tlisttto_posixR!R2trelpathRtsortRtwriterow(R&trecordst record_pathtbasetptwritertrow((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt write_record's   cCsg}|\}}tt|j}xs|D]k\}} t| d} | j} WdQXd|j| } tjj| } |j || | fq+Wtjj |d} |j || |t tjj |d}|j || fdS(Nurbu%s=%suRECORD( RRRRbtreadRR!R2tgetsizeRRRR(R&RWtlibdirt archive_pathsRtdistinfoRfRtapRtfR~Rtsize((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt write_records0s c Cs\t|dtjA}x7|D]/\}}tjd|||j||qWWdQXdS(NuwuWrote %s to %s in wheel(Rtzipfilet ZIP_DEFLATEDtloggertdebugtwrite(R&R)RRhRR((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt build_zip@sc! s|dkri}nttfdd$d}|dkrgd}tg}tg}tg}n!d}tg}dg}dg}|jd ||_|jd ||_ |jd ||_ |} d |j |j f} d | } d| } g} xKd%D]C}|kr qn|}t jj|rx t j|D]\}}}x|D]}tt jj||}t jj||}tt jj| ||}| j||f|dkrb|jd rbt|d}|j}WdQX|j|}t|d}|j|WdQXqbqbWqLWqqW| }d}xt j|D]\}}}||krxUt|D]G\}}t|}|jdrt jj||}||=PqqW|stdnxl|D]d}t|jd&rqnt jj||}tt jj||}| j||fqWqkWt j|}xf|D]^}|d'kr|tt jj||}tt jj| |}| j||fq|q|Wd|p|jdtd |g}x4|j D])\}}}|jd!|||fq Wt jj|d}t|d"}|jd#j|WdQXtt jj| d}| j||f|j!|| f| | t jj|j"|j#} |j$| | | S((u Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel. cs |kS(N((R(tpaths(s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRNRupurelibuplatlibiufalseutrueunoneuanyupyveruabiuarchu%s-%su%s.datau %s.dist-infoudatauheadersuscriptsu.exeurbNuwbu .dist-infou(.dist-info directory expected, not foundu.pycu.pyouRECORDu INSTALLERuSHAREDuWHEELuWheel-Version: %d.%duGenerator: distlib %suRoot-Is-Purelib: %su Tag: %s-%s-%suwu (upurelibuplatlib(udatauheadersuscripts(u.pycu.pyo(uRECORDu INSTALLERuSHAREDuWHEEL(%R0RRtIMPVERtABItARCHREtgetRFRGRHRKRLR!R2tisdirtwalkRRRRRtendswithRbRRRt enumeratetAssertionErrortlistdirt wheel_versionRRZRRJRMR(!R&RRZRtlibkeytis_puret default_pyvert default_abit default_archRRetdata_dirRfRtkeyR2troottdirstfilesRmRtrpRRR~RRktdnRiRFRGRHR)((Rs=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytbuildFs  "              %      cCKs |j}|jd}|jdt}tjj|j|j}d|j|j f}d|} d|} t j| t } t j| d} t j| d} t j d}t|d }|j| }||}t|}Wd QX|d jd d }tg|D]}t|^q}||jkrY|rY||j|n|ddkrv|d}n |d}i}|j| D}td|,}x"|D]}|d}||||jd.}6|6r|6jd/}6nWd QXWnt1k rt+j2d0nX|6r|6jd1i}>|6jd2i}?|>s|?r|jdd}@tjj?|@st@d3n|@|_xF|>jAD]8\}:}<d4|:|<f}A|j4|A}4|j5|4q(W|?ritd(6}BxL|?jAD];\}:}<d4|:|<f}A|j4|A|B}4|j5|4qWqqntjj|| }tB|}5tC|}|d=|d=||d5<|5jD||}|r9 |!j/|n|5jE|!|d6||5SWn+t1k r t+jFd7|jGnXWd tHjI|"XWd QXd S(9u Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. uwarnerulib_onlyu%s-%su%s.datau %s.dist-infouWHEELuRECORDuutf-8urNu Wheel-Versionu.iuRoot-Is-Purelibutrueupurelibuplatlibtstreamiuuscriptstdry_runu /RECORD.jwsiusize mismatch for %su=udigest mismatch for %sulib_only: skipping %su.exeu/urbudigest mismatch on write for %su.pyuByte-compilation failedtexc_infoulib_only: returning Noneu1.0uentry_points.txtuconsoleuguiu %s_scriptsuwrap_%su%s:%su %suAUnable to read legacy script metadata, so cannot generate scriptsu extensionsupython.commandsu8Unable to read JSON metadata, so cannot generate scriptsu wrap_consoleuwrap_guiuValid script path not specifiedu%s = %sulibuprefixuinstallation failed.(uconsoleugui(JRRtFalseR!R2RRJRMRKRLRaR R\R]RRbRRRR_R`RRR tTruetrecordR5tdont_write_bytecodettempfiletmkdtempt source_dirR0t target_dirtinfolistt isinstanceRRRtstrt file_sizeRRRt startswithRRR t copy_streamRt byte_compilet Exceptiontwarningtbasenametmaketset_executable_modetextendRWRtvaluestprefixtsuffixtflagstjsontloadRRdtitemsR Rrtwrite_shared_locationstwrite_installed_filest exceptiontrollbacktshutiltrmtree(CR&RtmakertkwargsRtwarnertlib_onlyR)ReRRft metadata_nametwheel_metadata_namet record_nameRgRhtbwfRpRsRjRkRlRRRotreaderRRtdata_pfxtinfo_pfxt script_pfxtfileoptbctoutfilestworkdirtzinfotarcnamet u_arcnametkindtvalueR~t_Rt is_scripttwhereRtoutfilet newdigesttpycRmtworknameRt filenamestdisttcommandsteptepdataRR-tdR.tstconsole_scriptst gui_scriptst script_dirtscripttoptions((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytinstallsD    %            #   "                            cCsGtdkrCtjjttdtjd }t |antS(Nu dylib-cachei( tcacheR0R!R2RRRR5RLR(R&R((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt_get_dylib_caches  c Cstjj|j|j}d|j|jf}d|}tj|d}tj d}g}t |dw}y\|j |G}||} t j | } |j} | j|} tjj| j| } tjj| stj| nx| jD]\}}tjj| t|}tjj|sHt}nQtj|j}tjj|}|j|}tj|j}||k}|r|j|| n|j||fqWWdQXWntk rnXWdQX|S(Nu%s-%su %s.dist-infou EXTENSIONSuutf-8ur( R!R2RRJRMRKRLRaR\R]RRbRRRt prefix_to_dirRRtmakedirsRR RYRtstattst_mtimetdatetimet fromtimestamptgetinfot date_timetextractRRc(R&R)ReRfRRgR3RhRoRpR*RRt cache_baseRKRtdestRt file_timeRWt wheel_time((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt_get_extensionss>     !  cCs t|S(uM Determine if a wheel is compatible with the running system. (t is_compatible(R&((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR%scCstS(uP Determine if a wheel is asserted as mountable by its metadata. (R(R&((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyt is_mountablescCs tjjtjj|j|j}|jsLd|}t|n|jsqd|}t|n|t jkrt j d|ns|rt jj |nt jj d||j}|rtt jkrt jj tntj||ndS(Nu)Wheel %s not compatible with this Python.u$Wheel %s is marked as not mountable.u%s already in pathi(R!R2RTRRJRMR%RR&R5RRRtinsertR$t_hookt meta_pathR+(R&RR)tmsgR*((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytmounts"'     cCstjjtjj|j|j}|tjkrItjd|n]tjj ||t j krxt j |nt j st tj krtj j t qndS(Nu%s not in path( R!R2RTRRJRMR5RRR/R(R$R)(R&R)((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytunmounts' cCstjj|j|j}d|j|jf}d|}d|}tj|t}tj|d}tj|d}t j d}t |d } | j |} || } t | } WdQX| djd d } tg| D]}t|^q}i}| j |D}td |,}x"|D]}|d }|||Fsu0Cannot update non-compliant (PEP-440) version %rR2tlegacyuVersion updated from %r to %r(R0RR}RRR`RRRRR RLRR R( RLR2tupdatedR.RkRRtmdR0((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytupdate_version;s(   0 !     u%s-%su %s.dist-infouRECORDuruutf-8u..uinvalid entry in wheel: %rNRu.whlRu wheel-update-tdiruNot a directory: %r(R!R2RRJRMRKRLRaRRRRRRRRR R0RtmkstemptcloseRRRRRRtcopyfile(R&tmodifiertdest_dirRR.R3R)ReRfRRRhR-RRRR2toriginal_versionRtmodifiedtcurrent_versiontfdtnewpathRRRW((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR( sX           (iiN(R?R@t__doc__RRR0RR'tpropertyRMRYRZRRqR^RWRRRRRRRRR$R%R&R+R,RUR((((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyRAs2)    h "    6cCstg}td}xGttjddddD](}|jdj|t|gq1Wg}xLtjD]>\}}}|j drp|j|j dddqpqpW|j t dkr|j dt n|jdg}tg}tjd kr=tjd t}|r=|j\} }}} t|}| g} | dkrg| jd n| dkr| jdn| dkr| jdn| dkr| jdn| dkr| jdnx`|dkr6x@| D]8} d| ||| f} | tkr|j| qqW|d8}qWq=nxH|D]@}x7|D]/} |jdjt|df|| fqQWqDWxwt|D]i\}}|jdjt|fddf|dkr|jdjt|dfddfqqWxwt|D]i\}}|jdjd|fddf|dkr|jdjd|dfddfqqWt|S(uG Return (pyver, abi, arch) tuples compatible with this Python. iiiuu.abiu.iunoneudarwinu(\w+)_(\d+)_(\d+)_(\w+)$ui386uppcufatux86_64ufat3uppc64ufat64uintelu universalu %s_%s_%s_%suanyupy(ui386uppc(ui386uppcux86_64(uppc64ux86_64(ui386ux86_64(ui386ux86_64uinteluppcuppc64(RtrangeR5t version_infoRRRR8t get_suffixesRRRRRR'RtplatformtreRPR|R`t IMP_PREFIXRtset(tversionstmajortminortabisRRR3tarchesRVRKRHtmatchesRPRRGRkRL((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pytcompatible_tagss`  $&$               1% 0% 0cCst|tst|}nt}|dkr9t}nxN|D]F\}}}||jkr@||jkr@||jkr@t}Pq@q@W|S(N( RRARR0tCOMPATIBLE_TAGSRFRGRHR(twheelRZR3tverRGRH((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyR%s  -(Tt __future__RRR\Rtdistutils.utilt distutilstemailRRR8RtloggingR!RaRERR5RRRRRtcompatRRRRRtdatabaseR RqR R tutilR R RRRRRRRRLRRt getLoggerR?RR0RthasattrRFRDRRRRBRERt get_platformR RRRtcompilet IGNORECASEtVERBOSERSRORvR{RzRyR"RtobjectR#R(RARNROR%(((s=/usr/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyts               (@     '   #  > PK.e[ ldistlib/metadata.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Implementation of the Metadata for Python packages PEPs. Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). """ from __future__ import unicode_literals import codecs from email import message_from_file import json import logging import re from . import DistlibException, __version__ from .compat import StringIO, string_types, text_type from .markers import interpret from .util import extract_by_key, get_extras from .version import get_scheme, PEP440_VERSION_RE logger = logging.getLogger(__name__) class MetadataMissingError(DistlibException): """A required metadata is missing""" class MetadataConflictError(DistlibException): """Attempt to read or write metadata fields that are conflictual.""" class MetadataUnrecognizedVersionError(DistlibException): """Unknown metadata version number.""" class MetadataInvalidError(DistlibException): """A metadata value is invalid""" # public API of this module __all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] # Encoding used for the PKG-INFO files PKG_INFO_ENCODING = 'utf-8' # preferred version. Hopefully will be changed # to 1.2 once PEP 345 is supported everywhere PKG_INFO_PREFERRED_VERSION = '1.1' _LINE_PREFIX_1_2 = re.compile('\n \|') _LINE_PREFIX_PRE_1_2 = re.compile('\n ') _241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License') _314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes', 'Provides', 'Requires') _314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL') _345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External') _345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External', 'Maintainer', 'Maintainer-email', 'Project-URL') _426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', 'Provides-Extra') _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension') _ALL_FIELDS = set() _ALL_FIELDS.update(_241_FIELDS) _ALL_FIELDS.update(_314_FIELDS) _ALL_FIELDS.update(_345_FIELDS) _ALL_FIELDS.update(_426_FIELDS) EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') def _version2fieldlist(version): if version == '1.0': return _241_FIELDS elif version == '1.1': return _314_FIELDS elif version == '1.2': return _345_FIELDS elif version == '2.0': return _426_FIELDS raise MetadataUnrecognizedVersionError(version) def _best_version(fields): """Detect the best version depending on the fields used.""" def _has_marker(keys, markers): for marker in markers: if marker in keys: return True return False keys = [] for key, value in fields.items(): if value in ([], 'UNKNOWN', None): continue keys.append(key) possible_versions = ['1.0', '1.1', '1.2', '2.0'] # first let's try to see if a field is not part of one of the version for key in keys: if key not in _241_FIELDS and '1.0' in possible_versions: possible_versions.remove('1.0') if key not in _314_FIELDS and '1.1' in possible_versions: possible_versions.remove('1.1') if key not in _345_FIELDS and '1.2' in possible_versions: possible_versions.remove('1.2') if key not in _426_FIELDS and '2.0' in possible_versions: possible_versions.remove('2.0') # possible_version contains qualified versions if len(possible_versions) == 1: return possible_versions[0] # found ! elif len(possible_versions) == 0: raise MetadataConflictError('Unknown metadata set') # let's see if one unique marker is found is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1: raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields') # we have the choice, 1.0, or 1.2, or 2.0 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption # - 2.0 adds more features and is very new if not is_1_1 and not is_1_2 and not is_2_0: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION if is_1_1: return '1.1' if is_1_2: return '1.2' return '2.0' _ATTR2FIELD = { 'metadata_version': 'Metadata-Version', 'name': 'Name', 'version': 'Version', 'platform': 'Platform', 'supported_platform': 'Supported-Platform', 'summary': 'Summary', 'description': 'Description', 'keywords': 'Keywords', 'home_page': 'Home-page', 'author': 'Author', 'author_email': 'Author-email', 'maintainer': 'Maintainer', 'maintainer_email': 'Maintainer-email', 'license': 'License', 'classifier': 'Classifier', 'download_url': 'Download-URL', 'obsoletes_dist': 'Obsoletes-Dist', 'provides_dist': 'Provides-Dist', 'requires_dist': 'Requires-Dist', 'setup_requires_dist': 'Setup-Requires-Dist', 'requires_python': 'Requires-Python', 'requires_external': 'Requires-External', 'requires': 'Requires', 'provides': 'Provides', 'obsoletes': 'Obsoletes', 'project_url': 'Project-URL', 'private_version': 'Private-Version', 'obsoleted_by': 'Obsoleted-By', 'extension': 'Extension', 'provides_extra': 'Provides-Extra', } _PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') _VERSIONS_FIELDS = ('Requires-Python',) _VERSION_FIELDS = ('Version',) _LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist', 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', 'Provides-Extra', 'Extension') _LISTTUPLEFIELDS = ('Project-URL',) _ELEMENTSFIELD = ('Keywords',) _UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') _MISSING = object() _FILESAFE = re.compile('[^A-Za-z0-9.]+') def _get_name_and_version(name, version, for_filename=False): """Return the distribution name with version. If for_filename is true, return a filename-escaped form.""" if for_filename: # For both name and version any runs of non-alphanumeric or '.' # characters are replaced with a single '-'. Additionally any # spaces in the version string become '.' name = _FILESAFE.sub('-', name) version = _FILESAFE.sub('-', version.replace(' ', '.')) return '%s-%s' % (name, version) class LegacyMetadata(object): """The legacy metadata of a release. Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name """ # TODO document the mapping API and UNKNOWN default key def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._fields = {} self.requires_files = [] self._dependencies = None self.scheme = scheme if path is not None: self.read(path) elif fileobj is not None: self.read_file(fileobj) elif mapping is not None: self.update(mapping) self.set_metadata_version() def set_metadata_version(self): self._fields['Metadata-Version'] = _best_version(self._fields) def _write_field(self, fileobj, name, value): fileobj.write('%s: %s\n' % (name, value)) def __getitem__(self, name): return self.get(name) def __setitem__(self, name, value): return self.set(name, value) def __delitem__(self, name): field_name = self._convert_name(name) try: del self._fields[field_name] except KeyError: raise KeyError(name) def __contains__(self, name): return (name in self._fields or self._convert_name(name) in self._fields) def _convert_name(self, name): if name in _ALL_FIELDS: return name name = name.replace('-', '_').lower() return _ATTR2FIELD.get(name, name) def _default_value(self, name): if name in _LISTFIELDS or name in _ELEMENTSFIELD: return [] return 'UNKNOWN' def _remove_line_prefix(self, value): if self.metadata_version in ('1.0', '1.1'): return _LINE_PREFIX_PRE_1_2.sub('\n', value) else: return _LINE_PREFIX_1_2.sub('\n', value) def __getattr__(self, name): if name in _ATTR2FIELD: return self[name] raise AttributeError(name) # # Public API # # dependencies = property(_get_dependencies, _set_dependencies) def get_fullname(self, filesafe=False): """Return the distribution name with version. If filesafe is true, return a filename-escaped form.""" return _get_name_and_version(self['Name'], self['Version'], filesafe) def is_field(self, name): """return True if name is a valid metadata key""" name = self._convert_name(name) return name in _ALL_FIELDS def is_multi_field(self, name): name = self._convert_name(name) return name in _LISTFIELDS def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close() def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) self.set_metadata_version() def write(self, filepath, skip_unknown=False): """Write the metadata fields to filepath.""" fp = codecs.open(filepath, 'w', encoding='utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close() def write_file(self, fileobject, skip_unknown=False): """Write the PKG-INFO format data to a file object.""" self.set_metadata_version() for field in _version2fieldlist(self['Metadata-Version']): values = self.get(field) if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): continue if field in _ELEMENTSFIELD: self._write_field(fileobject, field, ','.join(values)) continue if field not in _LISTFIELDS: if field == 'Description': if self.metadata_version in ('1.0', '1.1'): values = values.replace('\n', '\n ') else: values = values.replace('\n', '\n |') values = [values] if field in _LISTTUPLEFIELDS: values = [','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value) def update(self, other=None, **kwargs): """Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. """ def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) if not other: # other is None or empty container pass elif hasattr(other, 'keys'): for k in other.keys(): _set(k, other[k]) else: for k, v in other: _set(k, v) if kwargs: for k, v in kwargs.items(): _set(k, v) def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self['Name'] scheme = get_scheme(self.scheme) if name in _PREDICATE_FIELDS and value is not None: for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): logger.warning( "'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': value = self._remove_line_prefix(value) self._fields[name] = value def get(self, name, default=_MISSING): """Get a metadata field.""" name = self._convert_name(name) if name not in self._fields: if default is _MISSING: default = self._default_value(name) return default if name in _UNICODEFIELDS: value = self._fields[name] return value elif name in _LISTFIELDS: value = self._fields[name] if value is None: return [] res = [] for val in value: if name not in _LISTTUPLEFIELDS: res.append(val) else: # That's for Project-URL res.append((val[0], val[1])) return res elif name in _ELEMENTSFIELD: value = self._fields[name] if isinstance(value, string_types): return value.split(',') return self._fields[name] def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append("Wrong value for '%s': %s" % (field, value)) return missing, warnings def todict(self, skip_missing=False): """Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). """ self.set_metadata_version() mapping_1_0 = ( ('metadata_version', 'Metadata-Version'), ('name', 'Name'), ('version', 'Version'), ('summary', 'Summary'), ('home_page', 'Home-page'), ('author', 'Author'), ('author_email', 'Author-email'), ('license', 'License'), ('description', 'Description'), ('keywords', 'Keywords'), ('platform', 'Platform'), ('classifiers', 'Classifier'), ('download_url', 'Download-URL'), ) data = {} for key, field_name in mapping_1_0: if not skip_missing or field_name in self._fields: data[key] = self[field_name] if self['Metadata-Version'] == '1.2': mapping_1_2 = ( ('requires_dist', 'Requires-Dist'), ('requires_python', 'Requires-Python'), ('requires_external', 'Requires-External'), ('provides_dist', 'Provides-Dist'), ('obsoletes_dist', 'Obsoletes-Dist'), ('project_url', 'Project-URL'), ('maintainer', 'Maintainer'), ('maintainer_email', 'Maintainer-email'), ) for key, field_name in mapping_1_2: if not skip_missing or field_name in self._fields: if key != 'project_url': data[key] = self[field_name] else: data[key] = [','.join(u) for u in self[field_name]] elif self['Metadata-Version'] == '1.1': mapping_1_1 = ( ('provides', 'Provides'), ('requires', 'Requires'), ('obsoletes', 'Obsoletes'), ) for key, field_name in mapping_1_1: if not skip_missing or field_name in self._fields: data[key] = self[field_name] return data def add_requirements(self, requirements): if self['Metadata-Version'] == '1.1': # we can't have 1.1 metadata *and* Setuptools requires for field in ('Obsoletes', 'Requires', 'Provides'): if field in self: del self[field] self['Requires-Dist'] += requirements # Mapping API # TODO could add iter* variants def keys(self): return list(_version2fieldlist(self['Metadata-Version'])) def __iter__(self): for key in self.keys(): yield key def values(self): return [self[key] for key in self.keys()] def items(self): return [(key, self[key]) for key in self.keys()] def __repr__(self): return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version) METADATA_FILENAME = 'pydist.json' WHEEL_METADATA_FILENAME = 'metadata.json' class Metadata(object): """ The metadata of a release. This implementation uses 2.0 (JSON) metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. """ METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$') NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) VERSION_MATCHER = PEP440_VERSION_RE SUMMARY_MATCHER = re.compile('.{1,2047}') METADATA_VERSION = '2.0' GENERATOR = 'distlib (%s)' % __version__ MANDATORY_KEYS = { 'name': (), 'version': (), 'summary': ('legacy',), } INDEX_KEYS = ('name version license summary description author ' 'author_email keywords platform home_page classifiers ' 'download_url') DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' 'dev_requires provides meta_requires obsoleted_by ' 'supports_environments') SYNTAX_VALIDATORS = { 'metadata_version': (METADATA_VERSION_MATCHER, ()), 'name': (NAME_MATCHER, ('legacy',)), 'version': (VERSION_MATCHER, ('legacy',)), 'summary': (SUMMARY_MATCHER, ('legacy',)), } __slots__ = ('_legacy', '_data', 'scheme') def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._legacy = None self._data = None self.scheme = scheme #import pdb; pdb.set_trace() if mapping is not None: try: self._validate_mapping(mapping, scheme) self._data = mapping except MetadataUnrecognizedVersionError: self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) self.validate() else: data = None if path: with open(path, 'rb') as f: data = f.read() elif fileobj: data = fileobj.read() if data is None: # Initialised with no args - to be added self._data = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } else: if not isinstance(data, text_type): data = data.decode('utf-8') try: self._data = json.loads(data) self._validate_mapping(self._data, scheme) except ValueError: # Note: MetadataUnrecognizedVersionError does not # inherit from ValueError (it's a DistlibException, # which should not inherit from ValueError). # The ValueError comes from the json.load - if that # succeeds and we get a validation error, we want # that to propagate self._legacy = LegacyMetadata(fileobj=StringIO(data), scheme=scheme) self.validate() common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) none_list = (None, list) none_dict = (None, dict) mapped_keys = { 'run_requires': ('Requires-Dist', list), 'build_requires': ('Setup-Requires-Dist', list), 'dev_requires': none_list, 'test_requires': none_list, 'meta_requires': none_list, 'extras': ('Provides-Extra', list), 'modules': none_list, 'namespaces': none_list, 'exports': none_dict, 'commands': none_dict, 'classifiers': ('Classifier', list), 'source_url': ('Download-URL', None), 'metadata_version': ('Metadata-Version', None), } del none_list, none_dict def __getattribute__(self, key): common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, maker = mapped[key] if self._legacy: if lk is None: result = None if maker is None else maker() else: result = self._legacy.get(lk) else: value = None if maker is None else maker() if key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): result = self._data.get(key, value) else: # special cases for PEP 459 sentinel = object() result = sentinel d = self._data.get('extensions') if d: if key == 'commands': result = d.get('python.commands', value) elif key == 'classifiers': d = d.get('python.details') if d: result = d.get(key, value) else: d = d.get('python.exports') if not d: d = self._data.get('python.exports') if d: result = d.get(key, value) if result is sentinel: result = value elif key not in common: result = object.__getattribute__(self, key) elif self._legacy: result = self._legacy.get(key) else: result = self._data.get(key) return result def _validate_value(self, key, value, scheme=None): if key in self.SYNTAX_VALIDATORS: pattern, exclusions = self.SYNTAX_VALIDATORS[key] if (scheme or self.scheme) not in exclusions: m = pattern.match(value) if not m: raise MetadataInvalidError("'%s' is an invalid value for " "the '%s' property" % (value, key)) def __setattr__(self, key, value): self._validate_value(key, value) common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, _ = mapped[key] if self._legacy: if lk is None: raise NotImplementedError self._legacy[lk] = value elif key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): self._data[key] = value else: # special cases for PEP 459 d = self._data.setdefault('extensions', {}) if key == 'commands': d['python.commands'] = value elif key == 'classifiers': d = d.setdefault('python.details', {}) d[key] = value else: d = d.setdefault('python.exports', {}) d[key] = value elif key not in common: object.__setattr__(self, key, value) else: if key == 'keywords': if isinstance(value, string_types): value = value.strip() if value: value = value.split() else: value = [] if self._legacy: self._legacy[key] = value else: self._data[key] = value @property def name_and_version(self): return _get_name_and_version(self.name, self.version, True) @property def provides(self): if self._legacy: result = self._legacy['Provides-Dist'] else: result = self._data.setdefault('provides', []) s = '%s (%s)' % (self.name, self.version) if s not in result: result.append(s) return result @provides.setter def provides(self, value): if self._legacy: self._legacy['Provides-Dist'] = value else: self._data['provides'] = value def get_requirements(self, reqts, extras=None, env=None): """ Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation. """ if self._legacy: result = reqts else: result = [] extras = get_extras(extras or [], self.extras) for d in reqts: if 'extra' not in d and 'environment' not in d: # unconditional include = True else: if 'extra' not in d: # Not extra-dependent - only environment-dependent include = True else: include = d.get('extra') in extras if include: # Not excluded because of extras, check environment marker = d.get('environment') if marker: include = interpret(marker, env) if include: result.extend(d['requires']) for key in ('build', 'dev', 'test'): e = ':%s:' % key if e in extras: extras.remove(e) # A recursive call, but it should terminate since 'test' # has been removed from the extras reqts = self._data.get('%s_requires' % key, []) result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result @property def dictionary(self): if self._legacy: return self._from_legacy() return self._data @property def dependencies(self): if self._legacy: raise NotImplementedError else: return extract_by_key(self._data, self.DEPENDENCY_KEYS) @dependencies.setter def dependencies(self, value): if self._legacy: raise NotImplementedError else: self._data.update(value) def _validate_mapping(self, mapping, scheme): if mapping.get('metadata_version') != self.METADATA_VERSION: raise MetadataUnrecognizedVersionError() missing = [] for key, exclusions in self.MANDATORY_KEYS.items(): if key not in mapping: if scheme not in exclusions: missing.append(key) if missing: msg = 'Missing metadata items: %s' % ', '.join(missing) raise MetadataMissingError(msg) for k, v in mapping.items(): self._validate_value(k, v, scheme) def validate(self): if self._legacy: missing, warnings = self._legacy.check(True) if missing or warnings: logger.warning('Metadata: missing: %s, warnings: %s', missing, warnings) else: self._validate_mapping(self._data, self.scheme) def todict(self): if self._legacy: return self._legacy.todict(True) else: result = extract_by_key(self._data, self.INDEX_KEYS) return result def _from_legacy(self): assert self._legacy and not self._data result = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } lmd = self._legacy.todict(True) # skip missing ones for k in ('name', 'version', 'license', 'summary', 'description', 'classifier'): if k in lmd: if k == 'classifier': nk = 'classifiers' else: nk = k result[nk] = lmd[k] kw = lmd.get('Keywords', []) if kw == ['']: kw = [] result['keywords'] = kw keys = (('requires_dist', 'run_requires'), ('setup_requires_dist', 'build_requires')) for ok, nk in keys: if ok in lmd and lmd[ok]: result[nk] = [{'requires': lmd[ok]}] result['provides'] = self.provides author = {} maintainer = {} return result LEGACY_MAPPING = { 'name': 'Name', 'version': 'Version', 'license': 'License', 'summary': 'Summary', 'description': 'Description', 'classifiers': 'Classifier', } def _to_legacy(self): def process_entries(entries): reqts = set() for e in entries: extra = e.get('extra') env = e.get('environment') rlist = e['requires'] for r in rlist: if not env and not extra: reqts.add(r) else: marker = '' if extra: marker = 'extra == "%s"' % extra if env: if marker: marker = '(%s) and %s' % (env, marker) else: marker = env reqts.add(';'.join((r, marker))) return reqts assert self._data and not self._legacy result = LegacyMetadata() nmd = self._data for nk, ok in self.LEGACY_MAPPING.items(): if nk in nmd: result[ok] = nmd[nk] r1 = process_entries(self.run_requires + self.meta_requires) r2 = process_entries(self.build_requires + self.dev_requires) if self.extras: result['Provides-Extra'] = sorted(self.extras) result['Requires-Dist'] = sorted(r1) result['Setup-Requires-Dist'] = sorted(r2) # TODO: other fields such as contacts return result def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): if [path, fileobj].count(None) != 1: raise ValueError('Exactly one of path and fileobj is needed') self.validate() if legacy: if self._legacy: legacy_md = self._legacy else: legacy_md = self._to_legacy() if path: legacy_md.write(path, skip_unknown=skip_unknown) else: legacy_md.write_file(fileobj, skip_unknown=skip_unknown) else: if self._legacy: d = self._from_legacy() else: d = self._data if fileobj: json.dump(d, fileobj, ensure_ascii=True, indent=2, sort_keys=True) else: with codecs.open(path, 'w', 'utf-8') as f: json.dump(d, f, ensure_ascii=True, indent=2, sort_keys=True) def add_requirements(self, requirements): if self._legacy: self._legacy.add_requirements(requirements) else: run_requires = self._data.setdefault('run_requires', []) always = None for entry in run_requires: if 'environment' not in entry and 'extra' not in entry: always = entry break if always is None: always = { 'requires': requirements } run_requires.insert(0, always) else: rset = set(always['requires']) | set(requirements) always['requires'] = sorted(rset) def __repr__(self): name = self.name or '(no name)' version = self.version or 'no version' return '<%s %s %s (%s)>' % (self.__class__.__name__, self.metadata_version, name, version) PK.e[ȷE**distlib/resources.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2013-2016 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import bisect import io import logging import os import pkgutil import shutil import sys import types import zipimport from . import DistlibException from .util import cached_property, get_cache_base, path_to_cache_dir, Cache logger = logging.getLogger(__name__) cache = None # created when needed class ResourceCache(Cache): def __init__(self, base=None): if base is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('resource-cache')) super(ResourceCache, self).__init__(base) def is_stale(self, resource, path): """ Is the cache stale for the given resource? :param resource: The :class:`Resource` being cached. :param path: The path of the resource in the cache. :return: True if the cache is stale. """ # Cache invalidation is a hard problem :-) return True def get(self, resource): """ Get a resource into the cache, :param resource: A :class:`Resource` instance. :return: The pathname of the resource in the cache. """ prefix, path = resource.finder.get_cache_info(resource) if prefix is None: result = path else: result = os.path.join(self.base, self.prefix_to_dir(prefix), path) dirname = os.path.dirname(result) if not os.path.isdir(dirname): os.makedirs(dirname) if not os.path.exists(result): stale = True else: stale = self.is_stale(resource, path) if stale: # write the bytes of the resource to the cache location with open(result, 'wb') as f: f.write(resource.bytes) return result class ResourceBase(object): def __init__(self, finder, name): self.finder = finder self.name = name class Resource(ResourceBase): """ A class representing an in-package resource, such as a data file. This is not normally instantiated by user code, but rather by a :class:`ResourceFinder` which manages the resource. """ is_container = False # Backwards compatibility def as_stream(self): """ Get the resource as a stream. This is not a property to make it obvious that it returns a new stream each time. """ return self.finder.get_stream(self) @cached_property def file_path(self): global cache if cache is None: cache = ResourceCache() return cache.get(self) @cached_property def bytes(self): return self.finder.get_bytes(self) @cached_property def size(self): return self.finder.get_size(self) class ResourceContainer(ResourceBase): is_container = True # Backwards compatibility @cached_property def resources(self): return self.finder.get_resources(self) class ResourceFinder(object): """ Resource finder for file system resources. """ if sys.platform.startswith('java'): skipped_extensions = ('.pyc', '.pyo', '.class') else: skipped_extensions = ('.pyc', '.pyo') def __init__(self, module): self.module = module self.loader = getattr(module, '__loader__', None) self.base = os.path.dirname(getattr(module, '__file__', '')) def _adjust_path(self, path): return os.path.realpath(path) def _make_path(self, resource_name): # Issue #50: need to preserve type of path on Python 2.x # like os.path._get_sep if isinstance(resource_name, bytes): # should only happen on 2.x sep = b'/' else: sep = '/' parts = resource_name.split(sep) parts.insert(0, self.base) result = os.path.join(*parts) return self._adjust_path(result) def _find(self, path): return os.path.exists(path) def get_cache_info(self, resource): return None, resource.path def find(self, resource_name): path = self._make_path(resource_name) if not self._find(path): result = None else: if self._is_directory(path): result = ResourceContainer(self, resource_name) else: result = Resource(self, resource_name) result.path = path return result def get_stream(self, resource): return open(resource.path, 'rb') def get_bytes(self, resource): with open(resource.path, 'rb') as f: return f.read() def get_size(self, resource): return os.path.getsize(resource.path) def get_resources(self, resource): def allowed(f): return (f != '__pycache__' and not f.endswith(self.skipped_extensions)) return set([f for f in os.listdir(resource.path) if allowed(f)]) def is_container(self, resource): return self._is_directory(resource.path) _is_directory = staticmethod(os.path.isdir) def iterator(self, resource_name): resource = self.find(resource_name) if resource is not None: todo = [resource] while todo: resource = todo.pop(0) yield resource if resource.is_container: rname = resource.name for name in resource.resources: if not rname: new_name = name else: new_name = '/'.join([rname, name]) child = self.find(new_name) if child.is_container: todo.append(child) else: yield child class ZipResourceFinder(ResourceFinder): """ Resource finder for resources in .zip files. """ def __init__(self, module): super(ZipResourceFinder, self).__init__(module) archive = self.loader.archive self.prefix_len = 1 + len(archive) # PyPy doesn't have a _files attr on zipimporter, and you can't set one if hasattr(self.loader, '_files'): self._files = self.loader._files else: self._files = zipimport._zip_directory_cache[archive] self.index = sorted(self._files) def _adjust_path(self, path): return path def _find(self, path): path = path[self.prefix_len:] if path in self._files: result = True else: if path and path[-1] != os.sep: path = path + os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False if not result: logger.debug('_find failed: %r %r', path, self.loader.prefix) else: logger.debug('_find worked: %r %r', path, self.loader.prefix) return result def get_cache_info(self, resource): prefix = self.loader.archive path = resource.path[1 + len(prefix):] return prefix, path def get_bytes(self, resource): return self.loader.get_data(resource.path) def get_stream(self, resource): return io.BytesIO(self.get_bytes(resource)) def get_size(self, resource): path = resource.path[self.prefix_len:] return self._files[path][3] def get_resources(self, resource): path = resource.path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep plen = len(path) result = set() i = bisect.bisect(self.index, path) while i < len(self.index): if not self.index[i].startswith(path): break s = self.index[i][plen:] result.add(s.split(os.sep, 1)[0]) # only immediate children i += 1 return result def _is_directory(self, path): path = path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False return result _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder } try: # In Python 3.6, _frozen_importlib -> _frozen_importlib_external try: import _frozen_importlib_external as _fi except ImportError: import _frozen_importlib as _fi _finder_registry[_fi.SourceFileLoader] = ResourceFinder _finder_registry[_fi.FileFinder] = ResourceFinder del _fi except (ImportError, AttributeError): pass def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker _finder_cache = {} def finder(package): """ Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package. """ if package in _finder_cache: result = _finder_cache[package] else: if package not in sys.modules: __import__(package) module = sys.modules[package] path = getattr(module, '__path__', None) if path is None: raise DistlibException('You cannot get a finder for a module, ' 'only for a package') loader = getattr(module, '__loader__', None) finder_maker = _finder_registry.get(type(loader)) if finder_maker is None: raise DistlibException('Unable to locate finder for %r' % package) result = finder_maker(module) _finder_cache[package] = result return result _dummy_module = types.ModuleType(str('__dummy__')) def finder_for_path(path): """ Return a resource finder for a path, which should represent a container. :param path: The path. :return: A :class:`ResourceFinder` instance for the path. """ result = None # calls any path hooks, gets importer into cache pkgutil.get_importer(path) loader = sys.path_importer_cache.get(path) finder = _finder_registry.get(type(loader)) if finder: module = _dummy_module module.__file__ = os.path.join(path, '') module.__loader__ = loader result = finder(module) return result PK.e[|j$$distlib/util.pycnu[ abc@sddlZddlmZddlZddlZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZyddlZWnek rdZnXddlZddlZddlZddlZddlZyddlZWnek r9ddlZnXddlZddlmZddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0e j1e2Z3dZ4e j5e4Z6dZ7d e7d Z8e7d Z9d Z:d e:de9de4d e:de9dZ;dZ<de;de<de;dZ=e8d e4e8dZ>de>dZ?de7de?de=dZ@e j5e@ZAde:de9d ZBe j5eBZCdZDd ZEd!ZFd"ZGddd#ZHd$ZId%ZJd&ZKejLd'ZMejLd(ZNejLd)d*ZOd+ePfd,YZQd-ZRd.ePfd/YZSd0ZTd1ePfd2YZUe j5d3e jVZWd4ZXdd5ZYd6ZZd7Z[d8Z\d9Z]d:Z^e j5d;e j_Z`e j5d<Zadd=Zbe j5d>Zcd?Zdd@ZedAZfdBZgdCZhdDZidEePfdFYZjdGePfdHYZkdIePfdJYZldZmdendRZodSZpdZqdZePfd[YZre j5d\Zse j5d]Zte j5d^Zud_Zd`ZverddalmwZxmyZymzZzdbe%j{fdcYZ{ddexfdeYZwdfewe(fdgYZ|nej}dh Z~e~dkr dje%jfdkYZer dle%jfdmYZq ndne&jfdoYZerFdpe&jfdqYZndre&jfdsYZdtZduePfdvYZdwefdxYZdyefdzYZd{e)fd|YZd}ePfd~YZdZdS(iN(tdeque(tiglobi(tDistlibException(t string_typest text_typetshutilt raw_inputtStringIOtcache_from_sourceturlopenturljointhttplibt xmlrpclibt splittypet HTTPHandlertBaseConfiguratort valid_identt Containert configparsertURLErrortZipFiletfsdecodetunquotes\s*,\s*s (\w|[.-])+s(\*|:(\*|\w+):|t)s\*?s([<>=!~]=)|[<>]t(s)?\s*(s)(s)\s*(s))*s(from\s+(?P.*))s \(\s*(?Pt|s)\s*\)|(?Ps\s*)s)*s \[\s*(?Ps)?\s*\]s(?Ps \s*)?(\s*s)?$s(?Ps )\s*(?Pc Cskd}d}tj|}|rg|j}|d}|dpK|d}|dsad}nd}|dj}|sd}d}|d} n{|ddkrd |}ntj|} g| D]}||^q}d |d jg|D]} d | ^qf} |d s$d} ntj |d } t d|d|d| d| d|d|}n|S(NcSs|j}|d|dfS(Ntoptvn(t groupdict(tmtd((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_constraintYs tdntc1tc2tdireftis<>!=s~=s%s (%s)s, s%s %stextnamet constraintstextrast requirementtsourceturl( tNonetREQUIREMENT_REtmatchRtstriptRELOP_IDENT_REtfinditertjointCOMMA_REtsplitR( tsRtresultRRR&tconsR+tconstrtrstiteratortconR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_requirementWs4       0  cCsd}i}x|D]\}}}tjj||}xt|D]}tjj||} xt| D]v} ||| } |dkr|j| dqo||| } |jtjjdjd} | d| || RAtrstrip(tresources_roottrulesRGt destinationsRFtsuffixtdesttprefixtabs_basetabs_globtabs_patht resource_filetrel_pathtrel_dest((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_resources_dests|s  !cCs:ttdrt}ntjttdtjk}|S(Nt real_prefixt base_prefix(thasattrtsystTrueROtgetattr(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytin_venvs cCs7tjjtj}t|ts3t|}n|S(N(R?R@tnormcaseRZt executablet isinstanceRR(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_executables cCs|}xwtrt|}|}| r7|r7|}n|r |dj}||kr]Pn|r|d|||f}q|q q W|S(Nis %c: %s %s(R[Rtlower(tpromptt allowed_charst error_prompttdefaulttpR5tc((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytproceeds     cCsVt|tr|j}ni}x+|D]#}||kr+||||R$cCstjj|}||jkrtjj| r|jj|tjj|\}}|j|tj d||j stj |n|j r|j j|qndS(Ns Creating %s(R?R@RRRRR4RRRRtmkdirRR(RR@RR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs"   cCst|| }tjd|||js|sD|j||r{|sSd}q{|j|sht|t|}nt j |||t n|j ||S(NsByte-compiling %s to %s( RRRRRR,RBRCRDt py_compiletcompileR[R(RR@toptimizetforceROtdpathtdiagpath((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt byte_compiles   cCstjj|rtjj|rtjj| rtjd||js`tj |n|j r ||j kr|j j |qq qtjj|rd}nd}tjd|||jstj |n|j r||j kr |j j |q qndS(NsRemoving directory tree at %stlinktfilesRemoving %s %s(R?R@RRRRtdebugRRRRRRR(RR@R5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytensure_removeds"%     cCsjt}x]|setjj|r:tj|tj}Pntjj|}||kr\Pn|}q W|S(N(RR?R@RtaccesstW_OKR(RR@R6tparent((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt is_writables   cCs/|jst|j|jf}|j|S(sV Commit recorded changes, turn off recording, return changes. (RRCRRR(RR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytcommits cCs|jsx9t|jD](}tjj|rtj|qqWt|jdt }xq|D]f}tj |}|r|dgkst tjj ||d}tj |ntj |qaWn|jdS(Ntreverset __pycache__i(RtlistRR?R@RRtsortedRR[tlistdirRCR2trmdirR(RRtdirsRtflisttsd((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytrollbacks  N(RRRRRRRR[RR,RRRRtset_executable_modeRRRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRQs             cCs|tjkrtj|}n t|}|dkr@|}nG|jd}t||jd}x|D]}t||}qnW|S(Nt.i(RZtmodulest __import__R,R4R\RH(t module_namet dotted_pathtmodR6tpartsRg((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytresolves    t ExportEntrycBs;eZdZedZdZdZejZRS(cCs(||_||_||_||_dS(N(R&RORMR(RR&RORMR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs   cCst|j|jS(N(R RORM(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRscCs d|j|j|j|jfS(Ns(R&RORMR(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt__repr__!scCsdt|tst}nH|j|jko]|j|jko]|j|jko]|j|jk}|S(N(R`R RR&RORMR(RtotherR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt__eq__%s ( RRRRRR RRt__hash__(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR s    s(?P(\w|[-.+])+) \s*=\s*(?P(\w+)([:\.]\w+)*) \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? c CsStj|}|sId}d|ks3d|krOtd|qOn|j}|d}|d}|jd}|dkr|d}}n4|dkrtd|n|jd\}}|d } | dkrd|ksd|kr td|ng} n(g| jd D]} | j^q"} t|||| }|S( Nt[t]sInvalid specification '%s'R&tcallablet:iiRt,( tENTRY_REtsearchR,RRtcountR4R/R ( t specificationRR6RR&R@tcolonsRORMRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR{7s2          (cCs|d krd}ntjdkrHdtjkrHtjjd}ntjjd}tjj|rtj|tj }|st j d|qnGytj |t }Wn-tk rt j d|dt t}nX|s tj}t j d |ntjj||S( s Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. s.distlibtntt LOCALAPPDATAs $localappdatat~s(Directory exists but is not writable: %ssUnable to create %stexc_infos#Default location unusable, using %sN(R,R?R&tenvironR@t expandvarst expanduserRRRRtwarningtmakedirsR[tOSErrorRRRR2(RMR6tusable((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_cache_baseVs&       cCs`tjjtjj|\}}|r?|jdd}n|jtjd}||dS(s Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. Rs---s--s.cache(R?R@t splitdriveRR>RA(R@RRg((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytpath_to_cache_dirs $cCs|jds|dS|S(NR=(tendswith(R5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt ensure_slashscCskd}}d|kr^|jdd\}}d|krC|}q^|jdd\}}n|||fS(Nt@iR(R,R4(tnetloctusernametpasswordRO((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_credentialss    cCs tjd}tj||S(Ni(R?tumask(R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_process_umasks cCsXt}d}x3t|D]%\}}t|tst}PqqW|dk sTt|S(N(R[R,t enumerateR`RRRC(tseqR6tiR5((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytis_string_sequencess3([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-([a-z0-9_.+-]+)s -py(\d\.?\d?)cCsd}d}t|jdd}tj|}|r[|jd}||j }n|rt|t|dkrtj tj |d|}|r|j }|| ||d|f}qn|dkrt j |}|r|jd|jd|f}qn|S(sw Extract name, version, python version from a filename (no extension) Return name, version, pyver or None t t-is\biN( R,RR>tPYTHON_VERSIONRRtstartRDtreR.tescapetendtPROJECT_NAME_AND_VERSION(tfilenamet project_nameR6tpyverRtn((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytsplit_filenames"" ! 's-(?P[\w .-]+)\s*\(\s*(?P[^\s)]+)\)$cCsRtj|}|s(td|n|j}|djj|dfS(s A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. s$Ill-formed name/version string: '%s'R&tver(tNAME_VERSION_RER.RRR/Rb(RgRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytparse_name_and_versions  cCs t}t|pg}t|p'g}d|krS|jd||O}nx|D]}|dkr||j|qZ|jdr|d}||krtjd|n||kr|j|qqZ||krtjd|n|j|qZW|S(Nt*R6isundeclared extra: %s(RRRRBRR!(t requestedt availableR6trtunwanted((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt get_extrass&          cCsi}yqt|}|j}|jd}|jdsRtjd|n$tjd|}tj |}Wn&t k r}tj d||nX|S(Ns Content-Typesapplication/jsons(Unexpected response for JSON request: %ssutf-8s&Failed to get external data for %s: %s( R RtgetRBRRRuRvRxRyR|t exception(R+R6tresptheaderstcttreaderte((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt_get_external_datas  s'https://www.red-dove.com/pypi/projects/cCs9d|dj|f}tt|}t|}|S(Ns%s/%s/project.jsoni(tupperR t_external_data_base_urlRR(R&R+R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_project_datas cCs6d|dj||f}tt|}t|S(Ns%s/%s/package-%s.jsoni(RSR RTRR(R&tversionR+((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytget_package_datastCachecBs)eZdZdZdZdZRS(s A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. cCsvtjj|s"tj|ntj|jd@dkrQtjd|ntjjtjj ||_ dS(su Initialise an instance. :param base: The base directory where the cache should be located. i?isDirectory '%s' is not privateN( R?R@RR"RRRR!RtnormpathRF(RRF((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR"s cCs t|S(sN Converts a resource prefix to a directory name in the cache. (R'(RRO((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt prefix_to_dir0scCsg}xtj|jD]}tjj|j|}yZtjj|s^tjj|rntj|n"tjj|rt j |nWqt k r|j |qXqW|S(s" Clear the cache. ( R?RRFR@R2RRRRRRR|tappend(Rt not_removedtfn((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytclear6s$ (RRt__doc__RRZR^(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRXs  t EventMixincBs>eZdZdZedZdZdZdZRS(s1 A very simple publish/subscribe system. cCs i|_dS(N(t _subscribers(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRKscCs\|j}||kr+t|g|| %s;s %s;t}s (RmR[RoR2(RR6RvRxRuRp((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytdot s    ( RRRRqRRsRRR{RtpropertyRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRls      3s.tar.gzs.tar.bz2s.tars.zips.tgzs.tbzs.whlc sfd}tjjtd}|dkr|jdrZd}q|jdrxd}d}q|jdrd }d }q|jd rd}d}qtd|nz|dkrt|d}|rZ|j}x|D]}||qWqZnBt j ||}|rZ|j }x|D]}||qCWn|dkrt j ddkrxA|jD]0} t| jts| jjd| _qqWn|jWd|r|jnXdS(Ncs|t|ts!|jd}ntjjtjj|}|j se|tjkrxt d|ndS(Nsutf-8spath outside destination: %r( R`RtdecodeR?R@RR2RBRAR(R@Rg(tdest_dirtplen(s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt check_paths !#s.zips.whltzips.tar.gzs.tgzttgzsr:gzs.tar.bz2s.tbzttbzsr:bz2s.tarttarRHsUnknown format for %riisutf-8(s.zips.whl(s.tar.gzs.tgz(s.tar.bz2s.tbz(R?R@RRDR,R(RRtnamelistttarfileRtgetnamesRZRtt getmembersR`R&RRt extractallR( tarchive_filenameRtformatRRtarchiveRtnamesR&ttarinfo((RRs</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt unarchivesH           c Cstj}t|}t|d}xutj|D]d\}}}xR|D]J}tjj||}||} tjj| |} |j|| qPWq:WWdQX|S(s*zip a directory tree into a BytesIO objectRN( tiotBytesIORDRR?twalkR@R2R( t directoryR6tdlentzftrootRRR&tfulltrelRN((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytzip_dirSs    R$tKtMtGtTtPtProgresscBseZdZdddZdZdZdZdZedZ ed Z d Z ed Z ed Z RS( tUNKNOWNiidcCsV|dks||kst||_|_||_d|_d|_t|_dS(Ni( R,RCRtcurtmaxtstartedtelapsedRtdone(Rtminvaltmaxval((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRjs    cCs}|j|kst|jdks9||jks9t||_tj}|jdkri||_n||j|_dS(N(RRCRR,RttimeRR(Rtcurvaltnow((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytupdaters$   cCs*|dkst|j|j|dS(Ni(RCRR(Rtincr((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt increment|scCs|j|j|S(N(RR(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR8scCs/|jdk r"|j|jnt|_dS(N(RR,RR[R(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytstopscCs|jdkr|jS|jS(N(RR,tunknown(R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytmaximumscCsZ|jrd}nD|jdkr*d}n,d|j|j|j|j}d|}|S(Ns100 %s ?? %gY@s%3d %%(RRR,RR(RR6R((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt percentages   " cCsU|dkr|jdks-|j|jkr6d}ntjdtj|}|S(Nis??:??:??s%H:%M:%S(RR,RRRtstrftimetgmtime(RtdurationR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytformat_durations- cCs|jrd}|j}nd}|jdkr9d}ne|jdksZ|j|jkrcd}n;t|j|j}||j|j:}|d|j}d||j|fS(NtDonesETA iiis%s: %s(RRRR,RRtfloatR(RROtt((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytETAs   ! cCsh|jdkrd}n|j|j|j}x(tD] }|dkrLPn|d:}q6Wd||fS(Nigig@@s%d %sB/s(RRRtUNITS(RR6tunit((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytspeeds   (RRRRRRR8RRRRRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRgs    s \{([^}]*)\}s[^/\\,{]\*\*|\*\*[^/\\,}]s^[^{]*\}|\{[^}]*$cCsZtj|r(d}t||ntj|rPd}t||nt|S(sAExtended globbing function that supports ** and {opt1,opt2,opt3}.s7invalid glob %r: recursive glob "**" must be used alones2invalid glob %r: mismatching set marker '{' or '}'(t_CHECK_RECURSIVE_GLOBRRt_CHECK_MISMATCH_SETt_iglob(t path_globR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRsc cstj|d}t|dkrt|dksBt||\}}}x3|jdD]4}x+tdj|||fD] }|VqWqaWnd|krxt|D] }|VqWn|jdd\}}|dkrd}n|dkr d}n|jd}|jd }x]tj |D]L\}}} tj j |}x(ttj j||D] } | VqtWq7WdS( NiiRR$s**RRER=s\( t RICH_GLOBR4RDRCRR2t std_iglobRER?RR@RY( Rtrich_path_globRORRMtitemR@tradicaltdirRR]((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRs*%      "(t HTTPSHandlertmatch_hostnametCertificateErrortHTTPSConnectioncBseZdZeZdZRS(c Cstj|j|jf|j}t|dtrI||_|jnt t ds|j rmt j }n t j }t j||j|jd|dt jd|j |_nt jt j}|jt jO_|jr|j|j|jni}|j rHt j |_|jd|j tt dtrH|j|d!           N(RRR,RR[RR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRsRcBs&eZedZdZdZRS(cCs#tj|||_||_dS(N(tBaseHTTPSHandlerRRR(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR#s  cOs7t||}|jr3|j|_|j|_n|S(s This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. (RRR(RRiRjR6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt _conn_maker(s   cCs_y|j|j|SWnAtk rZ}dt|jkrTtd|jq[nXdS(Nscertificate verify faileds*Unable to verify server certificate for %s(tdo_openRRtstrtreasonRR(RtreqRQ((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt https_open8s(RRR[RRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR"s  tHTTPSOnlyHandlercBseZdZRS(cCstd|dS(NsAUnexpected HTTP request on what should be a secure connection: %s(R(RR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyt http_openLs(RRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRKsiitHTTPcBseZdddZRS(R$cKs5|dkrd}n|j|j|||dS(Ni(R,t_setupt_connection_class(RRRRj((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRXs  N(RRR,R(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR WstHTTPScBseZdddZRS(R$cKs5|dkrd}n|j|j|||dS(Ni(R,R R (RRRRj((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR`s  N(RRR,R(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR _st TransportcBseZddZdZRS(icCs ||_tjj||dS(N(RR R R(RRt use_datetime((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRgs cCs|j|\}}}tdkr<t|d|j}nN|j sY||jdkr}||_|tj|f|_n|jd}|S(NiiRii(ii(t get_host_infot _ver_infoR Rt _connectiont_extra_headersR tHTTPConnection(RRthtehtx509R6((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pytmake_connectionks   (RRRR(((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyR fs t SafeTransportcBseZddZdZRS(icCs ||_tjj||dS(N(RR RR(RRR((s</usr/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyRxs cCs|j|\}}}|s'i}n|j|ds                      . %   /       )           ,H6 ] *)   :+PK.e[: üdistlib/database.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2016 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """PEP 376 implementation.""" from __future__ import unicode_literals import base64 import codecs import contextlib import hashlib import logging import os import posixpath import sys import zipimport from . import DistlibException, resources from .compat import StringIO from .version import get_scheme, UnsupportedVersionError from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME from .util import (parse_requirement, cached_property, parse_name_and_version, read_exports, write_exports, CSVReader, CSVWriter) __all__ = ['Distribution', 'BaseInstalledDistribution', 'InstalledDistribution', 'EggInfoDistribution', 'DistributionPath'] logger = logging.getLogger(__name__) EXPORTS_FILENAME = 'pydist-exports.json' COMMANDS_FILENAME = 'pydist-commands.json' DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', 'RESOURCES', EXPORTS_FILENAME, 'SHARED') DISTINFO_EXT = '.dist-info' class _Cache(object): """ A simple cache mapping names and .dist-info paths to distributions """ def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. """ self.name = {} self.path = {} self.generated = False def clear(self): """ Clear the cache, setting it to its initial state. """ self.name.clear() self.path.clear() self.generated = False def add(self, dist): """ Add a distribution to the cache. :param dist: The distribution to add. """ if dist.path not in self.path: self.path[dist.path] = dist self.name.setdefault(dist.key, []).append(dist) class DistributionPath(object): """ Represents a set of distributions installed on a path (typically sys.path). """ def __init__(self, path=None, include_egg=False): """ Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. """ if path is None: path = sys.path self.path = path self._include_dist = True self._include_egg = include_egg self._cache = _Cache() self._cache_egg = _Cache() self._cache_enabled = True self._scheme = get_scheme('default') def _get_cache_enabled(self): return self._cache_enabled def _set_cache_enabled(self, value): self._cache_enabled = value cache_enabled = property(_get_cache_enabled, _set_cache_enabled) def clear_cache(self): """ Clears the internal cache. """ self._cache.clear() self._cache_egg.clear() def _yield_distributions(self): """ Yield .dist-info and/or .egg(-info) distributions. """ # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue if self._include_dist and entry.endswith(DISTINFO_EXT): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self) def _generate_cache(self): """ Scan the path for distributions and populate the cache with those that are found. """ gen_dist = not self._cache.generated gen_egg = self._include_egg and not self._cache_egg.generated if gen_dist or gen_egg: for dist in self._yield_distributions(): if isinstance(dist, InstalledDistribution): self._cache.add(dist) else: self._cache_egg.add(dist) if gen_dist: self._cache.generated = True if gen_egg: self._cache_egg.generated = True @classmethod def distinfo_dirname(cls, name, version): """ The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string""" name = name.replace('-', '_') return '-'.join([name, version]) + DISTINFO_EXT def get_distributions(self): """ Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances """ if not self._cache_enabled: for dist in self._yield_distributions(): yield dist else: self._generate_cache() for dist in self._cache.path.values(): yield dist if self._include_egg: for dist in self._cache_egg.path.values(): yield dist def get_distribution(self, name): """ Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` """ result = None name = name.lower() if not self._cache_enabled: for dist in self._yield_distributions(): if dist.key == name: result = dist break else: self._generate_cache() if name in self._cache.name: result = self._cache.name[name][0] elif self._include_egg and name in self._cache_egg.name: result = self._cache_egg.name[name][0] return result def provides_distribution(self, name, version=None): """ Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string """ matcher = None if not version is None: try: matcher = self._scheme.matcher('%s (%s)' % (name, version)) except ValueError: raise DistlibException('invalid name or version: %r, %r' % (name, version)) for dist in self.get_distributions(): provided = dist.provides for p in provided: p_name, p_ver = parse_name_and_version(p) if matcher is None: if p_name == name: yield dist break else: if p_name == name and matcher.match(p_ver): yield dist break def get_file_path(self, name, relative_path): """ Return the path to a resource file. """ dist = self.get_distribution(name) if dist is None: raise LookupError('no distribution named %r found' % name) return dist.get_resource_path(relative_path) def get_exported_entries(self, category, name=None): """ Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. """ for dist in self.get_distributions(): r = dist.exports if category in r: d = r[category] if name is not None: if name in d: yield d[name] else: for v in d.values(): yield v class Distribution(object): """ A base class for distributions, whether installed or from indexes. Either way, it must have some metadata, so that's all that's needed for construction. """ build_time_dependency = False """ Set to True if it's known to be only a build-time dependency (i.e. not needed after installation). """ requested = False """A boolean that indicates whether the ``REQUESTED`` metadata file is present (in other words, whether the package was installed by user request or it was installed as a dependency).""" def __init__(self, metadata): """ Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. """ self.metadata = metadata self.name = metadata.name self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None self.extras = None # additional features requested self.context = None # environment marker overrides self.download_urls = set() self.digests = {} @property def source_url(self): """ The source archive download URL for this distribution. """ return self.metadata.source_url download_url = source_url # Backward compatibility @property def name_and_version(self): """ A utility property which displays the name and version in parentheses. """ return '%s (%s)' % (self.name, self.version) @property def provides(self): """ A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. """ plist = self.metadata.provides s = '%s (%s)' % (self.name, self.version) if s not in plist: plist.append(s) return plist def _get_requirements(self, req_attr): md = self.metadata logger.debug('Getting requirements from metadata %r', md.todict()) reqts = getattr(md, req_attr) return set(md.get_requirements(reqts, extras=self.extras, env=self.context)) @property def run_requires(self): return self._get_requirements('run_requires') @property def meta_requires(self): return self._get_requirements('meta_requires') @property def build_requires(self): return self._get_requirements('build_requires') @property def test_requires(self): return self._get_requirements('test_requires') @property def dev_requires(self): return self._get_requirements('dev_requires') def matches_requirement(self, req): """ Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. """ # Requirement may contain extras - parse to lose those # from what's passed to the matcher r = parse_requirement(req) scheme = get_scheme(self.metadata.scheme) try: matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive result = False for p in self.provides: p_name, p_ver = parse_name_and_version(p) if p_name != name: continue try: result = matcher.match(p_ver) break except UnsupportedVersionError: pass return result def __repr__(self): """ Return a textual representation of this instance, """ if self.source_url: suffix = ' [%s]' % self.source_url else: suffix = '' return '' % (self.name, self.version, suffix) def __eq__(self, other): """ See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. """ if type(other) is not type(self): result = False else: result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url) return result def __hash__(self): """ Compute hash in a way which matches the equality test. """ return hash(self.name) + hash(self.version) + hash(self.source_url) class BaseInstalledDistribution(Distribution): """ This is the base class for installed distributions (whether PEP 376 or legacy). """ hasher = None def __init__(self, metadata, path, env=None): """ Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. """ super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env def get_hash(self, data, hasher=None): """ Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str """ if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) class InstalledDistribution(BaseInstalledDistribution): """ Created with the *path* of the ``.dist-info`` directory provided to the constructor. It reads the metadata contained in ``pydist.json`` when it is instantiated., or uses a passed in Metadata instance (useful for when dry-run mode is being used). """ hasher = 'sha256' def __init__(self, path, metadata=None, env=None): self.finder = finder = resources.finder_for_path(path) if finder is None: import pdb; pdb.set_trace () if env and env._cache_enabled and path in env._cache.path: metadata = env._cache.path[path].metadata elif metadata is None: r = finder.find(METADATA_FILENAME) # Temporary - for Wheel 0.23 support if r is None: r = finder.find(WHEEL_METADATA_FILENAME) # Temporary - for legacy support if r is None: r = finder.find('METADATA') if r is None: raise ValueError('no %s found in %s' % (METADATA_FILENAME, path)) with contextlib.closing(r.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') super(InstalledDistribution, self).__init__(metadata, path, env) if env and env._cache_enabled: env._cache.add(self) try: r = finder.find('REQUESTED') except AttributeError: import pdb; pdb.set_trace () self.requested = r is not None def __repr__(self): return '' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def _get_records(self): """ Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). """ results = [] r = self.get_distinfo_resource('RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir #base_location = os.path.dirname(self.path) #base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing #if not os.path.isabs(path): # path = path.replace('/', os.sep) # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results @cached_property def exports(self): """ Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result def read_exports(self): """ Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: with contextlib.closing(r.as_stream()) as stream: result = read_exports(stream) return result def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f) def get_resource_path(self, relative_path): """ NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. """ r = self.get_distinfo_resource('RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for relative, destination in resources_reader: if relative == relative_path: return destination raise KeyError('no resource file with relative path %r ' 'is installed' % relative_path) def list_installed_files(self): """ Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) """ for result in self._get_records(): yield result def write_installed_files(self, paths, prefix, dry_run=False): """ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. """ prefix = os.path.join(prefix, '') base = os.path.dirname(self.path) base_under_prefix = base.startswith(prefix) base = os.path.join(base, '') record_path = self.get_distinfo_file('RECORD') logger.info('creating %s', record_path) if dry_run: return None with CSVWriter(record_path) as writer: for path in paths: if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): # do not put size and hash, as in PEP-376 hash_value = size = '' else: size = '%d' % os.path.getsize(path) with open(path, 'rb') as fp: hash_value = self.get_hash(fp.read()) if path.startswith(base) or (base_under_prefix and path.startswith(prefix)): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) # add the RECORD file itself if record_path.startswith(base): record_path = os.path.relpath(record_path, base) writer.writerow((record_path, '', '')) return record_path def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file('RECORD') for path, hash_value, size in self.list_installed_files(): if not os.path.isabs(path): path = os.path.join(base, path) if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if size and actual_size != size: mismatches.append((path, 'size', size, actual_size)) elif hash_value: if '=' in hash_value: hasher = hash_value.split('=', 1)[0] else: hasher = None with open(path, 'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if actual_hash != hash_value: mismatches.append((path, 'hash', hash_value, actual_hash)) return mismatches @cached_property def shared_locations(self): """ A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory. """ result = {} shared_path = os.path.join(self.path, 'SHARED') if os.path.isfile(shared_path): with codecs.open(shared_path, 'r', encoding='utf-8') as f: lines = f.read().splitlines() for line in lines: key, value = line.split('=', 1) if key == 'namespace': result.setdefault(key, []).append(value) else: result[key] = value return result def write_shared_locations(self, paths, dry_run=False): """ Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to. """ shared_path = os.path.join(self.path, 'SHARED') logger.info('creating %s', shared_path) if dry_run: return None lines = [] for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) with codecs.open(shared_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) return shared_path def get_distinfo_resource(self, path): if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) finder = resources.finder_for_path(self.path) if finder is None: raise DistlibException('Unable to get a finder for %s' % self.path) return finder.find(path) def get_distinfo_file(self, path): """ Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``'/'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn't start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str """ # Check if it is an absolute path # XXX use relpath, add tests if path.find(os.sep) >= 0: # it's an absolute path? distinfo_dirname, path = path.split(os.sep)[-2:] if distinfo_dirname != self.path.split(os.sep)[-1]: raise DistlibException( 'dist-info file %r does not belong to the %r %s ' 'distribution' % (path, self.name, self.version)) # The file must be relative if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) return os.path.join(self.path, path) def list_distinfo_files(self): """ Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths """ base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path def __eq__(self, other): return (isinstance(other, InstalledDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ class EggInfoDistribution(BaseInstalledDistribution): """Created with the *path* of the ``.egg-info`` directory or file provided to the constructor. It reads the metadata contained in the file itself, or if the given path happens to be a directory, the metadata is read from the file ``PKG-INFO`` under that directory.""" requested = True # as we have no way of knowing, assume it was shared_locations = {} def __init__(self, path, env=None): def set_name_and_version(s, n, v): s.name = n s.key = n.lower() # for case-insensitive comparisons s.version = v self.path = path self.dist_path = env if env and env._cache_enabled and path in env._cache_egg.path: metadata = env._cache_egg.path[path].metadata set_name_and_version(self, metadata.name, metadata.version) else: metadata = self._get_metadata(path) # Need to be set before caching set_name_and_version(self, metadata.name, metadata.version) if env and env._cache_enabled: env._cache_egg.add(self) super(EggInfoDistribution, self).__init__(metadata, path, env) def _get_metadata(self, path): requires = None def parse_requires_data(data): """Create a list of dependencies from a requires.txt file. *data*: the contents of a setuptools-produced requires.txt file. """ reqs = [] lines = data.splitlines() for line in lines: line = line.strip() if line.startswith('['): logger.warning('Unexpected line: quitting requirement scan: %r', line) break r = parse_requirement(line) if not r: logger.warning('Not recognised as a requirement: %r', line) continue if r.extras: logger.warning('extra requirements in requires.txt are ' 'not supported') if not r.constraints: reqs.append(r.name) else: cons = ', '.join('%s%s' % c for c in r.constraints) reqs.append('%s (%s)' % (r.name, cons)) return reqs def parse_requires_path(req_path): """Create a list of dependencies from a requires.txt file. *req_path*: the path to a setuptools-produced requires.txt file. """ reqs = [] try: with codecs.open(req_path, 'r', 'utf-8') as fp: reqs = parse_requires_data(fp.read()) except IOError: pass return reqs if path.endswith('.egg'): if os.path.isdir(path): meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO') metadata = Metadata(path=meta_path, scheme='legacy') req_path = os.path.join(path, 'EGG-INFO', 'requires.txt') requires = parse_requires_path(req_path) else: # FIXME handle the case where zipfile is not available zipf = zipimport.zipimporter(path) fileobj = StringIO( zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) metadata = Metadata(fileobj=fileobj, scheme='legacy') try: data = zipf.get_data('EGG-INFO/requires.txt') requires = parse_requires_data(data.decode('utf-8')) except IOError: requires = None elif path.endswith('.egg-info'): if os.path.isdir(path): req_path = os.path.join(path, 'requires.txt') requires = parse_requires_path(req_path) path = os.path.join(path, 'PKG-INFO') metadata = Metadata(path=path, scheme='legacy') else: raise DistlibException('path must end with .egg-info or .egg, ' 'got %r' % path) if requires: metadata.add_requirements(requires) return metadata def __repr__(self): return '' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): for path, _, _ in self.list_installed_files(): if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) return mismatches def list_installed_files(self): """ Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) """ def _md5(path): f = open(path, 'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, 'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) # "./" is present as a marker between installed files # and installation metadata files if not os.path.exists(p): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue #otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result def list_distinfo_files(self, absolute=False): """ Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths """ record_path = os.path.join(self.path, 'installed-files.txt') skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line def __eq__(self, other): return (isinstance(other, EggInfoDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ new_dist_class = InstalledDistribution old_dist_class = EggInfoDistribution class DependencyGraph(object): """ Represents a dependency graph between distributions. The dependency relationships are stored in an ``adjacency_list`` that maps distributions to a list of ``(other, label)`` tuples where ``other`` is a distribution and the edge is labeled with ``label`` (i.e. the version specifier, if such was provided). Also, for more efficient traversal, for every distribution ``x``, a list of predecessors is kept in ``reverse_list[x]``. An edge from distribution ``a`` to distribution ``b`` means that ``a`` depends on ``b``. If any missing dependencies are found, they are stored in ``missing``, which is a dictionary that maps distributions to a list of requirements that were not provided by any other distributions. """ def __init__(self): self.adjacency_list = {} self.reverse_list = {} self.missing = {} def add_distribution(self, distribution): """Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = [] #self.missing[distribution] = [] def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None`` """ self.adjacency_list[x].append((y, label)) # multiple edges are allowed, so be careful if x not in self.reverse_list[y]: self.reverse_list[y].append(x) def add_missing(self, distribution, requirement): """ Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str`` """ logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement) def _repr_dist(self, dist): return '%s %s' % (dist.name, dist.version) def repr_node(self, dist, level=1): """Prints only a subgraph""" output = [self._repr_dist(dist)] for other, label in self.adjacency_list[dist]: dist = self._repr_dist(other) if label is not None: dist = '%s [%s]' % (dist, label) output.append(' ' * level + str(dist)) suboutput = self.repr_node(other, level + 1) subs = suboutput.split('\n') output.extend(subs[1:]) return '\n'.join(output) def to_dot(self, f, skip_disconnected=True): """Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool`` """ disconnected = [] f.write("digraph dependencies {\n") for dist, adjs in self.adjacency_list.items(): if len(adjs) == 0 and not skip_disconnected: disconnected.append(dist) for other, label in adjs: if not label is None: f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label)) else: f.write('"%s" -> "%s"\n' % (dist.name, other.name)) if not skip_disconnected and len(disconnected) > 0: f.write('subgraph disconnected {\n') f.write('label = "Disconnected"\n') f.write('bgcolor = red\n') for dist in disconnected: f.write('"%s"' % dist.name) f.write('\n') f.write('}\n') f.write('}\n') def topological_sort(self): """ Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. """ result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) def __repr__(self): """Representation of the graph""" output = [] for dist, adjs in self.adjacency_list.items(): output.append(self.repr_node(dist)) return '\n'.join(output) def make_graph(dists, scheme='default'): """Makes a dependency graph from the given distributions. :parameter dists: a list of distributions :type dists: list of :class:`distutils2.database.InstalledDistribution` and :class:`distutils2.database.EggInfoDistribution` instances :rtype: a :class:`DependencyGraph` instance """ scheme = get_scheme(scheme) graph = DependencyGraph() provided = {} # maps names to lists of (version, dist) tuples # first, build the graph and find out what's provided for dist in dists: graph.add_distribution(dist) for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) provided.setdefault(name, []).append((version, dist)) # now make the edges for dist in dists: requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires) for req in requires: try: matcher = scheme.matcher(req) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive matched = False if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: graph.add_edge(dist, provider, req) matched = True break if not matched: graph.add_missing(dist, req) return graph def get_dependent_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are dependent on *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) dep = [dist] # dependent distributions todo = graph.reverse_list[dist] # list of nodes we should inspect while todo: d = todo.pop() dep.append(d) for succ in graph.reverse_list[d]: if succ not in dep: todo.append(succ) dep.pop(0) # remove dist from dep, was there to prevent infinite loops return dep def get_required_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are required by *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) req = [] # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect while todo: d = todo.pop()[0] req.append(d) for pred in graph.adjacency_list[d]: if pred not in req: todo.append(pred) return req def make_dist(name, version, **kwargs): """ A convenience method for making a dist given just a name and version. """ summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Placeholder for summary' return Distribution(md) PK.e[~==distlib/__init__.pyonu[ abc@sddlZdZdefdYZyddlmZWn*ek rhdejfdYZnXejeZ e j edS(iNs0.2.4tDistlibExceptioncBseZRS((t__name__t __module__(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyR s(t NullHandlerRcBs#eZdZdZdZRS(cCsdS(N((tselftrecord((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pythandletcCsdS(N((RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pytemitRcCs d|_dS(N(tNonetlock(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyt createLockR(RRRRR (((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyRs  ( tloggingt __version__t ExceptionRRt ImportErrortHandlert getLoggerRtloggert addHandler(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyts  PK.e[:Yndistlib/database.pyonu[ abc@s0dZddlmZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl m Z mZddlmZddlmZmZddlmZmZmZdd lmZmZmZmZmZmZmZd d d d dgZ ej!e"Z#dZ$dZ%deddde$dfZ&dZ'de(fdYZ)de(fdYZ*de(fdYZ+de+fdYZ,de,fd YZ-d!e,fd"YZ.e-Z/e.Z0d#e(fd$YZ1d%d&Z2d'Z3d(Z4d)Z5dS(*uPEP 376 implementation.i(tunicode_literalsNi(tDistlibExceptiont resources(tStringIO(t get_schemetUnsupportedVersionError(tMetadatatMETADATA_FILENAMEtWHEEL_METADATA_FILENAME(tparse_requirementtcached_propertytparse_name_and_versiont read_exportst write_exportst CSVReadert CSVWriteru DistributionuBaseInstalledDistributionuInstalledDistributionuEggInfoDistributionuDistributionPathupydist-exports.jsonupydist-commands.jsonu INSTALLERuRECORDu REQUESTEDu RESOURCESuSHAREDu .dist-infot_CachecBs)eZdZdZdZdZRS(uL A simple cache mapping names and .dist-info paths to distributions cCsi|_i|_t|_dS(uZ Initialise an instance. There is normally one for each DistributionPath. N(tnametpathtFalset generated(tself((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__init__0s  cCs'|jj|jjt|_dS(uC Clear the cache, setting it to its initial state. N(RtclearRRR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR8s  cCsH|j|jkrD||j|j<|jj|jgj|ndS(u` Add a distribution to the cache. :param dist: The distribution to add. N(RRt setdefaulttkeytappend(Rtdist((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytadd@s(t__name__t __module__t__doc__RRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR,s  tDistributionPathcBseZdZd edZdZdZeeeZ dZ dZ dZ e dZdZd Zd d Zd Zd d ZRS(uU Represents a set of distributions installed on a path (typically sys.path). cCsg|dkrtj}n||_t|_||_t|_t|_t|_ t d|_ dS(u Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. udefaultN( tNonetsysRtTruet _include_distt _include_eggRt_cachet _cache_eggt_cache_enabledRt_scheme(RRt include_egg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRNs        cCs|jS(N(R((R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_get_cache_enabledbscCs ||_dS(N(R((Rtvalue((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_set_cache_enabledescCs|jj|jjdS(u, Clears the internal cache. N(R&RR'(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt clear_cachejs c cst}x|jD]}tj|}|dkr:qn|jd}| s|j r`qnt|j}x^|D]V}|j|}| sv|j|krqvn|jr}|j t r}t t g}x<|D]1}t j||} |j| } | rPqqWqvtj| j} td| dd} WdQXtjd|j|j|jt|jd| d|Vqv|jrv|j d rvtjd|j|j|jt|j|VqvqvWqWdS( uD Yield .dist-info and/or .egg(-info) distributions. utfileobjtschemeulegacyNuFound %stmetadatatenvu .egg-infou.egg(u .egg-infou.egg(tsetRRtfinder_for_pathR!tfindt is_containertsortedR$tendswitht DISTINFO_EXTRRt posixpathtjoint contextlibtclosingt as_streamRtloggertdebugRtnew_dist_classR%told_dist_class( RtseenRtfindertrtrsettentrytpossible_filenamestmetadata_filenamet metadata_pathtpydisttstreamR1((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_yield_distributionsrs@       cCs|jj }|jo |jj }|s/|rxF|jD]8}t|trd|jj|q<|jj|q<W|rt|j_n|rt|j_qndS(uk Scan the path for distributions and populate the cache with those that are found. N( R&RR%R'RMt isinstancetInstalledDistributionRR#(Rtgen_disttgen_eggR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_generate_caches  cCs)|jdd}dj||gtS(uo The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: stringu-u_(treplaceR;R9(tclsRtversion((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytdistinfo_dirnamesccs|js(xv|jD] }|VqWnZ|jx|jjjD] }|VqEW|jrx"|jjjD] }|VqpWndS(u5 Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances N(R(RMRRR&RtvaluesR%R'(RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_distributionss     cCsd}|j}|jsNx|jD]}|j|kr(|}Pq(q(Wne|j||jjkr|jj|d}n2|jr||j jkr|j j|d}n|S(u= Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` iN( R!tlowerR(RMRRRR&RR%R'(RRtresultR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_distributions     c csd}|dk r_y |jjd||f}Wq_tk r[td||fq_Xnx|jD]z}|j}xh|D]`}t|\}}|dkr||kr|VPqq||kr|j|r|VPqqWqlWdS(u Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string u%s (%s)uinvalid name or version: %r, %rN( R!R)tmatchert ValueErrorRRXtprovidesR tmatch( RRRUR\Rtprovidedtptp_nametp_ver((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytprovides_distributions$       cCs;|j|}|dkr.td|n|j|S(u5 Return the path to a resource file. uno distribution named %r foundN(R[R!t LookupErrortget_resource_path(RRt relative_pathR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt get_file_paths ccsxy|jD]k}|j}||kr ||}|dk rY||kru||Vquqxx|jD] }|VqfWq q WdS(u Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. N(RXtexportsR!RW(RtcategoryRRREtdtv((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_exported_entries"s     N(RRRR!RRR+R-tpropertyt cache_enabledR.RMRRt classmethodRVRXR[RdRhRm(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR Js    *    $ t DistributioncBseZdZeZeZdZedZeZ edZ edZ dZ edZ edZedZed Zed Zd Zd Zd ZdZRS(u A base class for distributions, whether installed or from indexes. Either way, it must have some metadata, so that's all that's needed for construction. cCsp||_|j|_|jj|_|j|_d|_d|_d|_d|_ t |_ i|_ dS(u Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. N( R1RRYRRUR!tlocatortdigesttextrastcontextR3t download_urlstdigests(RR1((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRGs        cCs |jjS(uH The source archive download URL for this distribution. (R1t source_url(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRxXscCsd|j|jfS(uX A utility property which displays the name and version in parentheses. u%s (%s)(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytname_and_versionascCsB|jj}d|j|jf}||kr>|j|n|S(u A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. u%s (%s)(R1R^RRUR(Rtplistts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR^hs   cCsS|j}tjd|jt||}t|j|d|jd|jS(Nu%Getting requirements from metadata %rRtR2( R1R?R@ttodicttgetattrR3tget_requirementsRtRu(Rtreq_attrtmdtreqts((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_get_requirementsts  cCs |jdS(Nu run_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt run_requires{scCs |jdS(Nu meta_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt meta_requiresscCs |jdS(Nubuild_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytbuild_requiresscCs |jdS(Nu test_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt test_requiresscCs |jdS(Nu dev_requires(R(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt dev_requiressc Cst|}t|jj}y|j|j}Wn@tk rvtjd||j d}|j|}nX|j }t }x]|j D]R}t |\}} ||krqny|j| }PWqtk rqXqW|S(u Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. u+could not read version %r - using name onlyi(R RR1R0R\t requirementRR?twarningtsplitRRR^R R_( RtreqRER0R\RRZRaRbRc((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytmatches_requirements*      cCs6|jrd|j}nd}d|j|j|fS(uC Return a textual representation of this instance, u [%s]uu(RxRRU(Rtsuffix((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__repr__s cCs[t|t|k r!t}n6|j|jkoT|j|jkoT|j|jk}|S(u< See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. (ttypeRRRURx(RtotherRZ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__eq__s  cCs't|jt|jt|jS(uH Compute hash in a way which matches the equality test. (thashRRURx(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__hash__s(RRRRtbuild_time_dependencyt requestedRRnRxt download_urlRyR^RRRRRRRRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRq5s$   " tBaseInstalledDistributioncBs,eZdZdZddZddZRS(u] This is the base class for installed distributions (whether PEP 376 or legacy). cCs,tt|j|||_||_dS(u Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. N(tsuperRRRt dist_path(RR1RR2((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs  cCs|dkr|j}n|dkr6tj}d}ntt|}d|j}||j}tj|jdj d}d||fS(u Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str uu%s=t=uasciiu%s%sN( R!thasherthashlibtmd5R}Rstbase64turlsafe_b64encodetrstriptdecode(RtdataRtprefixRs((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytget_hashs      !N(RRRR!RRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs ROcBseZdZdZdddZdZdZdZe dZ dZ dZ d Z d Zed Zd Ze d ZedZdZdZdZdZejZRS(u  Created with the *path* of the ``.dist-info`` directory provided to the constructor. It reads the metadata contained in ``pydist.json`` when it is instantiated., or uses a passed in Metadata instance (useful for when dry-run mode is being used). usha256c Cstj||_}|dkr;ddl}|jn|rr|jrr||jjkrr|jj|j }n|dkr$|j t }|dkr|j t }n|dkr|j d}n|dkrt dt |fntj|j}td|dd}WdQXntt|j||||rb|jrb|jj|ny|j d}Wn'tk rddl}|jnX|dk |_dS(NiuMETADATAuno %s found in %sR/R0ulegacyu REQUESTED(RR4RDR!tpdbt set_traceR(R&RR1R5RRR]R<R=R>RRRORRtAttributeErrorR(RRR1R2RDRRERL((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs4  !       cCsd|j|j|jfS(Nu#(RRUR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR2scCsd|j|jfS(Nu%s %s(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt__str__6sc Csg}|jd}tj|j}td|i}x_|D]W}gtt|dD] }d^qb}||\}} } |j|| | fqFWWdQXWdQX|S(u" Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). uRECORDRLiN( tget_distinfo_resourceR<R=R>RtrangetlenR!R( RtresultsRERLt record_readertrowtitmissingRtchecksumtsize((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt _get_records9s (&cCs.i}|jt}|r*|j}n|S(u Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. (RtEXPORTS_FILENAMER (RRZRE((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRiPs cCsLi}|jt}|rHtj|j}t|}WdQXn|S(u Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. N(RRR<R=R>R (RRZRERL((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR ^s cCs8|jt}t|d}t||WdQXdS(u Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. uwN(tget_distinfo_fileRtopenR (RRitrftf((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR msc Cs|jd}tj|jF}td|.}x$|D]\}}||kr@|Sq@WWdQXWdQXtd|dS(uW NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. u RESOURCESRLNu3no resource file with relative path %r is installed(RR<R=R>RtKeyError(RRgRERLtresources_readertrelativet destination((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRfxs  ccs x|jD] }|Vq WdS(u Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) N(R(RRZ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytlist_installed_filessc Cstjj|d}tjj|j}|j|}tjj|d}|jd}tjd||rwdSt |}x|D]}tjj |s|j d rd} } nCdtjj |} t |d} |j| j} WdQX|j|s(|r@|j|r@tjj||}n|j|| | fqW|j|rtjj||}n|j|ddfWdQX|S( u Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. uuRECORDu creating %su.pycu.pyou%durbN(u.pycu.pyo(tosRR;tdirnamet startswithRR?tinfoR!RtisdirR8tgetsizeRRtreadtrelpathtwriterow( RtpathsRtdry_runtbasetbase_under_prefixt record_pathtwriterRt hash_valueRtfp((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytwrite_installed_filess. ! c Csg}tjj|j}|jd}xn|jD]`\}}}tjj|sptjj||}n||krq7ntjj|s|j|dt t fq7tjj |r7t tjj |}|r||kr|j|d||fq|rd|kr3|jddd}nd }t|dG} |j| j|} | |kr|j|d|| fnWd QXqq7q7W|S( u Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. uRECORDuexistsusizeu=iiurbuhashN(RRRRRtisabsR;texistsRR#RtisfiletstrRRR!RRR( Rt mismatchesRRRRRt actual_sizeRRt actual_hash((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytcheck_installed_filess.    ,cCsi}tjj|jd}tjj|rtj|ddd}|jj}WdQXx[|D]P}|jdd\}}|dkr|j |gj |qj|||su%s (%s)( RtstripRR?RR Rtt constraintsRRR;(RtreqsRRREtcons((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytparse_requires_dataos&       csRg}y4tj|dd}|j}WdQXWntk rMnX|S(uCreate a list of dependencies from a requires.txt file. *req_path*: the path to a setuptools-produced requires.txt file. uruutf-8N(RRRtIOError(treq_pathRR(R(s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytparse_requires_paths u.egguEGG-INFOuPKG-INFORR0ulegacyu requires.txtuEGG-INFO/PKG-INFOuutf8R/uEGG-INFO/requires.txtuutf-8u .egg-infou,path must end with .egg-info or .egg, got %r(R!R8RRRR;Rt zipimportt zipimporterRtget_dataRRRtadd_requirements( RRtrequiresRt meta_pathR1RtzipfR/R((Rs@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRls:     cCsd|j|j|jfS(Nu!(RRUR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRscCsd|j|jfS(Nu%s %s(RRU(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRscCsg}tjj|jd}tjj|rx`|jD]O\}}}||kr^q=ntjj|s=|j|dttfq=q=Wn|S(u Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. uinstalled-files.txtuexists(RRR;RRRR#R(RRRRt_((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs  #c Cs2d}d}tjj|jd}g}tjj|r.tj|ddd}x|D]}|j}tjjtjj|j|}tjj|stj d||j d rqdqntjj |sd|j |||||fqdqdWWd QX|j |d d fn|S( u Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) cSs@t|d}z|j}Wd|jXtj|jS(Nurb(RRtcloseRRt hexdigest(RRtcontent((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_md5s  cSstj|jS(N(Rtstattst_size(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyt_sizesuinstalled-files.txturRuutf-8uNon-existent file: %su.pycu.pyoN(u.pycu.pyo(RRR;RRRRtnormpathR?RR8RRR!(RRRRRZRRRa((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs"    $ /c cstjj|jd}t}tj|ddd}x|D]}|j}|dkrjt}q@n|s@tjjtjj|j|}|j |jr|r|Vq|Vqq@q@WWdQXdS(u  Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths uinstalled-files.txturRuutf-8u./N( RRR;R#RRRRRR(RtabsoluteRtskipRRRa((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRs    $cCst|to|j|jkS(N(RNRR(RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRsN(RRRR#RRR!RRRRRRRRRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyRNs  K    &  tDependencyGraphcBsheZdZdZdZd dZdZdZddZ e dZ d Z d Z RS( u Represents a dependency graph between distributions. The dependency relationships are stored in an ``adjacency_list`` that maps distributions to a list of ``(other, label)`` tuples where ``other`` is a distribution and the edge is labeled with ``label`` (i.e. the version specifier, if such was provided). Also, for more efficient traversal, for every distribution ``x``, a list of predecessors is kept in ``reverse_list[x]``. An edge from distribution ``a`` to distribution ``b`` means that ``a`` depends on ``b``. If any missing dependencies are found, they are stored in ``missing``, which is a dictionary that maps distributions to a list of requirements that were not provided by any other distributions. cCsi|_i|_i|_dS(N(tadjacency_listt reverse_listR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyR.s  cCsg|j| "%s" [label="%s"] u "%s" -> "%s" usubgraph disconnected { ulabel = "Disconnected" ubgcolor = red u"%s"u u} N(RRtitemsRRR!R(RRtskip_disconnectedt disconnectedRtadjsRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/database.pytto_dotgs&    %    cCs=g}i}x(|jjD]\}}|||t|jD])\}}|sZ|j|||=qZqZW|sPnxO|jD]A\}}g|D]$\}}||kr||f^q||sL         4  7F 6  PK.e[bޑ66distlib/resources.pycnu[ abc@s ddlmZddlZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl m Z ddl mZmZmZmZejeZdadefdYZdefd YZd efd YZd efd YZdefdYZdefdYZieed6ee j6Z yQyddl!Z"Wne#k rddl$Z"nXee e"j%R R((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytfinds  cCst|jdS(Nurb(RR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR'scCs)t|jd}|jSWdQXdS(Nurb(RR tread(RRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR+scCstjj|jS(N(RR tgetsize(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR,scsDfd}tgtj|jD]}||r%|^q%S(Ncs|dko|jj S(Nu __pycache__(tendswithtskipped_extensions(R (R(sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytalloweds (tsetRtlistdirR (RRRIR ((RsA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR2scCs|j|jS(N(RCR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR0sccs|j|}|dk r|g}x|r|jd}|V|jr'|j}xe|jD]W}|sr|}ndj||g}|j|}|jr|j|q]|Vq]Wq'q'WndS(Niu/(RDRtpopR0R%R3R tappend(RR>RttodotrnameR%tnew_nametchild((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pytiterators        (u.pycu.pyou.class(u.pycu.pyo(R"R#R.tsystplatformt startswithRHR R9RARBRRDR'R+R,R2R0t staticmethodRR RRCRR(((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR4ws"          tZipResourceFindercBs_eZdZdZdZdZdZdZdZdZ dZ d Z RS( u6 Resource finder for resources in .zip files. cCstt|j||jj}dt||_t|jdrY|jj|_nt j ||_t |j|_ dS(Niu_files( R RWR R7tarchivetlent prefix_lenthasattrt_filest zipimportt_zip_directory_cachetsortedtindex(RR5RX((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR s cCs|S(N((RR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR9scCs||j}||jkr%t}nr|rN|dtjkrN|tj}ntj|j|}y|j|j|}Wntk rt }nX|st j d||j j nt j d||j j |S(Niu_find failed: %r %ru_find worked: %r %r(RZR\RRR?tbisectR`RUt IndexErrorR/tloggertdebugR7R(RR Rti((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRBs    cCs-|jj}|jdt|}||fS(Ni(R7RXR RY(RRRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRs cCs|jj|jS(N(R7tget_dataR (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR+scCstj|j|S(N(tiotBytesIOR+(RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR'scCs|j|j}|j|dS(Ni(R RZR\(RRR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR,scCs|j|j}|r9|dtjkr9|tj7}nt|}t}tj|j|}xn|t|jkr|j|j|sPn|j||}|j |j tjdd|d7}qfW|S(Niii( R RZRR?RYRJRaR`RUtaddR<(RRR tplenRRets((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyR2s   cCs||j}|r6|dtjkr6|tj7}ntj|j|}y|j|j|}Wntk r~t}nX|S(Ni(RZRR?RaR`RURbR/(RR ReR((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRCs   ( R"R#R.R R9RBRR+R'R,R2RC(((sA/usr/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyRWs       cCs|tt|sJ         ",!ZM       PK.e[iffdistlib/locators.pyonu[ abc@s&ddlZddlmZddlZddlZddlZddlZddlZyddlZWne k rddl ZnXddl Z ddl m Z ddlmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZddlm Z m!Z!m"Z"ddl#m$Z$ddl%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.dd l/m0Z0m1Z1dd l2m3Z3m4Z4ej5e6Z7ej8d Z9ej8d ej:Z;ej8d Z<dZ=e>dZ?defdYZ@deAfdYZBdeBfdYZCdeBfdYZDdeAfdYZEdeBfdYZFdeBfdYZGdeBfdYZHd eBfd!YZId"eBfd#YZJeJeHeFd$d%d&d'd(ZKeKjLZLej8d)ZMd*eAfd+YZNdS(,iN(tBytesIOi(tDistlibException(turljointurlparset urlunparset url2pathnamet pathname2urltqueuetquotetunescapet string_typest build_openertHTTPRedirectHandlert text_typetRequestt HTTPErrortURLError(t DistributiontDistributionPatht make_dist(tMetadata( tcached_propertytparse_credentialst ensure_slashtsplit_filenametget_project_datatparse_requirementtparse_name_and_versiont ServerProxytnormalize_name(t get_schemetUnsupportedVersionError(tWheelt is_compatibles^(\w+)=([a-f0-9]+)s;\s*charset\s*=\s*(.*)\s*$stext/html|application/x(ht)?mlshttps://pypi.python.org/pypicCs1|dkrt}nt|dd}|jS(s Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. ttimeoutg@N(tNonet DEFAULT_INDEXRt list_packages(turltclient((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytget_all_distribution_names)s  tRedirectHandlercBs%eZdZdZeZZZRS(sE A class to work around a bug in some Python 3.2.x releases. c Csd}x(dD] }||kr ||}Pq q W|dkrAdSt|}|jdkrt|j|}t|dr|j||q||| Clear any errors which may have been logged. N(RR(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt clear_errorsscCs|jjdS(N(RDtclear(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt clear_cachescCs|jS(N(t_scheme(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _get_schemescCs ||_dS(N(RV(R3tvalue((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _set_schemescCstddS(s= For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. s Please implement in the subclassN(tNotImplementedError(R3tname((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _get_projects cCstddS(sJ Return all the distribution names known to this locator. s Please implement in the subclassN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytget_distribution_namesscCsj|jdkr!|j|}nE||jkr@|j|}n&|j|j|}||j|<|S(s For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. N(RDR#R\RS(R3R[RP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt get_projects  cCsyt|}tj|j}t}|jd}|rTtt||j}n|j dkd|j k|||fS(su Give an url a score which can be used to choose preferred URLs for a given project release. s.whlthttpsspypi.python.org( Rt posixpathtbasenametpathtTruetendswithR!R t wheel_tagsR.tnetloc(R3R&ttRat compatibletis_wheel((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt score_urls cCs{|}|rw|j|}|j|}||kr?|}n||kratjd||qwtjd||n|S(s{ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implementation favours https:// URLs over http://, archives from PyPI over those from other locations, wheel compatibility (if a wheel) and then the archive name. sNot replacing %r with %rsReplacing %r with %r(Rjtloggertdebug(R3turl1turl2RPts1ts2((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt prefer_urls    cCs t||S(sZ Attempt to split a filename in project name, version and Python version. (R(R3tfilenamet project_name((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRsc Csd}d}t|\}}}}} } | jjdrXtjd|| ntj| } | r| j\} } n d\} } |}|r|ddkr|d }n|j dryt |}t ||j r|dkrt }n||j|}|ri|jd6|jd6|jd 6t||||| d fd 6d jg|jD]}d jt|d^qdd6}qnWqtk r}tjd|qXn|j |jrtj|}}x|jD]}|j |r|t| }|j||}|s@tjd|nu|\}}}| se|||ri|d6|d6|d 6t||||| d fd 6}|r||d= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. sNot a valid requirement: %rsmatcher: %s (%s)iRRs%s did not match %rs%skipping pre-release version %s of %sserror matching %s with %riR:ssorted list: %siN(RR(R#RRRR.RFt requirementRkRlttypeR<R^R[Rt version_classR}t is_prereleaseRMRRtsortedR:textrasRKRt download_urlsR(R3Rt prereleasesRPtrR.RFtversionstslisttvclstkRxtdtsdR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytlocatePsT          $   (s.tar.gzs.tar.bz2s.tars.zips.tgzs.tbz(s.eggs.exes.whl(s.pdfN(s.whl(R<R=R>tsource_extensionstbinary_extensionstexcluded_extensionsR#ReRRIRRRSRURWRYtpropertyR.R\R]R^RjRqRRRRRLR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRBSs.             F  tPyPIRPCLocatorcBs)eZdZdZdZdZRS(s This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). cKs8tt|j|||_t|dd|_dS(s Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. R"g@N(tsuperRRItbase_urlRR'(R3R&tkwargs((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs cCst|jjS(sJ Return all the distribution names known to this locator. (RR'R%(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]sc Csviid6id6}|jj|t}xF|D]>}|jj||}|jj||}td|j}|d|_|d|_|j d|_ |j dg|_ |j d|_ t |}|r0|d } | d |_|j| |_||_|||RIR]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs tPyPIJSONLocatorcBs)eZdZdZdZdZRS(sw This locator uses PyPI's JSON interface. It's very limited in functionality and probably not worth using. cKs)tt|j|t||_dS(N(RRRIRR(R3R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCstddS(sJ Return all the distribution names known to this locator. sNot available from this locatorN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]scCsiid6id6}t|jdt|}yE|jj|}|jj}tj|}t d|j }|d}|d|_ |d|_ |j d|_|j d g|_|j d |_t|}||_|d} |||j RIR]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs  tPagecBszeZdZejdejejBejBZejdejejBZ dZ ejdejZ e dZ RS(s4 This class represents a scraped HTML page. s (rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*))\s+)? href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*)) (\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*)))? s!]+)cCsM||_||_|_|jj|j}|rI|jd|_ndS(sk Initialise an instance with the Unicode page contents and the URL they came from. iN(RRR&t_basetsearchtgroup(R3RR&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs  s[^a-z0-9$&+,/:;=?@.#%_\\|-]cCsd}t}x|jj|jD]}|jd}|dpv|dpv|dpv|dpv|dpv|d}|d p|d p|d }t|j|}t|}|jj d |}|j ||fq(Wt |d ddt }|S(s Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. cSs@t|\}}}}}}t||t||||fS(sTidy up an URL.(RRR(R&R.RfRbRRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytclean%sR,trel1trel2trel3trel4trel5trel6RmRnturl3cSsdt|jdS(Ns%%%2xi(tordR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt3R,R:cSs|dS(Ni((Rg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR7R,treverse( Rt_hreftfinditerRt groupdictRRR t _clean_retsubRRRc(R3RRPR}RtrelR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytlinkss   (R<R=R>tretcompiletItStXRRRIRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs tSimpleScrapingLocatorcBseZdZiejd6dd6dd6ZdddZdZd Z d Z e j d e j Zd Zd ZdZdZdZe j dZdZRS(s A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. tdeflatecCstjdttjS(Ntfileobj(tgziptGzipFileRRR(tb((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRER,RcCs|S(N((R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRFR,tnonei cKstt|j|t||_||_i|_t|_t j |_ t|_ t |_||_tj|_tj|_dS(s Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. N(RRRIRRR"t _page_cacheRt_seenRRGt _to_fetcht _bad_hostsRLtskip_externalst num_workerst threadingtRLockt_lockt_gplock(R3R&R"RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIIs       cCscg|_xSt|jD]B}tjd|j}|jt|j|jj |qWdS(s Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). ttargetN( t_threadstrangeRRtThreadt_fetcht setDaemonRctstartRM(R3tiRg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_prepare_threadscs    cCsOx!|jD]}|jjdq Wx|jD]}|jq.Wg|_dS(su Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. N(RRRR#R(R3Rg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _wait_threadsps c Csiid6id6}|j||_||_t|jdt|}|jj|jj|j z1t j d||j j ||j jWd|jX|`WdQX|S(NRRs%s/s Queueing %s(RRPRsRRRRRTRRRkRlRRRR(R3R[RPR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\}s        s<\b(linux-(i\d86|x86_64|arm\w+)|win(32|-amd64)|macosx-?\d+)\bcCs|jj|S(sD Does an URL refer to a platform-specific download? (tplatform_dependentR(R3R&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_is_platform_dependentscCsp|j|rd}n|j||j}tjd|||rl|j|j|j|WdQXn|S(s% See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. sprocess_download: %s -> %sN( RR#RRsRkRlRRRP(R3R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_process_downloads   c Cst|\}}}}}}|j|j|j|jrGt}n|jrl|j|j rlt}n|j|jst}ny|d krt}nd|d krt}nO|j |rt}n7|j ddd} | j d krt}nt }t jd |||||S( s Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. thomepagetdownloadthttpR_tftpt:iit localhosts#should_queue: %s (%s) from %s -> %s(RR (R R_R (RRdRRRRLRR{RRtsplitRzRcRkRl( R3tlinktreferrerRR.RfRbt_RPthost((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _should_queues*           cCs xtr|jj}zy|r|j|}|dkrEwnx|jD]y\}}||jkrO|jj||j| r|j |||rt j d|||jj |qqOqOWnWn)t k r}|jj t|nXWd|jjX|sPqqWdS(s Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. sQueueing %s from %sN(RcRRKtget_pageR#RRRRRRkRlRRRHR RO(R3R&tpageRRRQ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs(  !cCst|\}}}}}}|dkrZtjjt|rZtt|d}n||jkr|j|}tj d||nK|j ddd}d}||j krtj d||n t |did d 6}zy7tj d ||jj|d |j} tj d || j} | jdd} tj| r| j} | j} | jd}|r|j|}|| } nd}tj| }|r|jd}ny| j|} Wn tk r| jd} nXt| | }||j| ]*>([^<]+)tzlibt decompressRR#RIRRR\RRRRRRRRRR#R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR;s$           ;tDirectoryLocatorcBs2eZdZdZdZdZdZRS(s? This class locates distributions in a directory tree. cKso|jdt|_tt|j|tjj|}tjj |sbt d|n||_ dS(s Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, t recursivesNot a directory: %rN( RRcR'RR&RIRRbtabspathRRtbase_dir(R3RbR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRI5s cCs|j|jS(s Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. (RdR(R3Rrtparent((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytshould_includeFsc Csiid6id6}xtj|jD]\}}}x|D]}|j||r=tjj||}tddttjj|dddf}|j ||}|r|j ||qq=q=W|j s'Pq'q'W|S(NRRRR,( RtwalkR)R+RbRRRR(RRR'( R3R[RPtroottdirstfilestfnR&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\Ns"   c Cst}xtj|jD]\}}}x|D]}|j||r2tjj||}tddttjj |dddf}|j |d}|r|j |dqq2q2W|j sPqqW|S(sJ Return all the distribution names known to this locator. RR,R[N(RRR,R)R+RbRRRR(RR#RR'(R3RPR-R.R/R0R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]^s "   (R<R=R>RIR+R\R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR&0s    t JSONLocatorcBs eZdZdZdZRS(s This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. cCstddS(sJ Return all the distribution names known to this locator. sNot available from this locatorN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]xscCsBiid6id6}t|}|r>x|jdgD]}|ddks9|ddkreq9nt|d|d d |jd d d |j}|j}|d |_d|kr|drd|df|_n|jdi|_|jdi|_|||j <|dj |j t j |d q9Wn|S(NRRR/tptypetsdistt pyversiontsourceR[RxRsPlaceholder for summaryR.R&RRt requirementstexports( RRKRR.RRRt dependenciesR7RxRRR(R3R[RPRRRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\~s&        .(R<R=R>R]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR1qs tDistPathLocatorcBs eZdZdZdZRS(s This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. cKs#tt|j|||_dS(ss Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. N(RR9RItdistpath(R3R:R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCs|jj|}|dkr5iid6id6}nGi||j6it|jg|j6d6itdg|j6d6}|S(NRR(R:tget_distributionR#RxRR(R3R[RRP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\s  (R<R=R>RIR\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR9s tAggregatingLocatorcBsPeZdZdZdZdZeejj eZdZ dZ RS(sI This class allows you to chain and/or merge a list of locators. cOs8|jdt|_||_tt|j|dS(s Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). tmergeN(RRLR=tlocatorsRR<RI(R3R>R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs  cCs5tt|jx|jD]}|jqWdS(N(RR<RUR>(R3R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRUscCs*||_x|jD]}||_qWdS(N(RVR>R.(R3RXR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRYs c Cs]i}xP|jD]E}|j|}|r|jr|jdi}|jdi}|j||jd}|r|rxF|jD]5\}} ||kr||c| OR^R=RKtupdateRRFR#RcRLR}( R3R[RPRRR/RtdfRRtddtfound((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\s8         cCsIt}x9|jD].}y||jO}Wqtk r@qXqW|S(sJ Return all the distribution names known to this locator. (RR>R]RZ(R3RPR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]s  ( R<R=R>RIRURYRRBR.tfgetR\R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR<s    ,shttps://pypi.python.org/simple/R"g@R.tlegacys1(?P[\w-]+)\s*\(\s*(==\s*)?(?P[^)]+)\)$tDependencyFindercBsVeZdZddZdZdZdZdZdZ de dZ RS( s0 Locate dependencies for distributions. cCs(|p t|_t|jj|_dS(sf Initialise an instance, using the specified locator to locate distributions. N(tdefault_locatorRRR.(R3R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCstjd||j}||j|<||j||jf= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. t cantreplace( treqtsRRQR}RxRt frozensetRLRORRLRc( R3RRtothertproblemstrlistt unmatchedRNRFRP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyttry_to_replaceos"       # cCsi|_i|_i|_i|_t|p0g}d|krk|jd|tdddgO}nt|tr|}}tj d|nK|j j |d|}}|dkrt d|ntj d|t|_t}t|g}t|g}x|r|j}|j} | |jkrO|j|n/|j| } | |kr~|j|| |n|j|jB} |j} t} ||krxAdD]6}d |}||kr| t|d |O} qqWn| | B| B}x|D]}|j|}|s+tj d||j j |d|}|dkrv| rv|j j |dt}n|dkrtj d||jd|fq+|j|j}}||f|jkr|j|n|j||| kr+||kr+|j|tj d|jq+nxw|D]o}|j} | |jkrr|jj|tj|q2|j| } | |kr2|j|| |q2q2WqWqWt|jj}x<|D]4}||k|_|jrtj d|jqqWtj d|||fS(s Find a distribution and all distributions it depends on. :param requirement: The requirement specifying the distribution to find, or a Distribution instance. :param meta_extras: A list of meta extras such as :test:, :build: and so on. :param prereleases: If ``True``, allow pre-release versions to be returned - otherwise, don't return prereleases unless they're all that's available. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. s:*:s:test:s:build:s:dev:spassed %s as requirementRsUnable to locate %rs located %sttesttbuildtdevs:%s:s %s_requiressNo providers found for %rsCannot satisfy %rt unsatisfiedsAdding %s to install_distss#%s is a build-time dependency only.sfind done for %sN(R\R]R^(RJRHRGRURRMt isinstanceRRkRlRRR#RRct requestedRR:RLR[t run_requirest meta_requirestbuild_requirestgetattrRSRRxtname_and_versionRtvaluestbuild_time_dependency(R3Rt meta_extrasRRRRXttodot install_distsR[RWtireqtstsreqtstereqtsR:RQt all_reqtsRt providersRRtnRRKRH((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytfinds                      !       "  "   N( R<R=R>R#RIRLRORQRSR[RLRr(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyREs      ((ORtioRRtloggingRR`RRt ImportErrortdummy_threadingR$R,RtcompatRRRRRRRR R R R R1R RRRtdatabaseRRRRRtutilRRRRRRRRRRxRRRR R!t getLoggerR<RkRR|RRRR$R#R(R)tobjectRBRRRRR&R1R9R<RFRtNAME_VERSION_RERE(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytsV        d@ :0E:A&[    PK.e[㚳distlib/locators.pycnu[ abc@s&ddlZddlmZddlZddlZddlZddlZddlZyddlZWne k rddl ZnXddl Z ddl m Z ddlmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZddlm Z m!Z!m"Z"ddl#m$Z$ddl%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.dd l/m0Z0m1Z1dd l2m3Z3m4Z4ej5e6Z7ej8d Z9ej8d ej:Z;ej8d Z<dZ=e>dZ?defdYZ@deAfdYZBdeBfdYZCdeBfdYZDdeAfdYZEdeBfdYZFdeBfdYZGdeBfdYZHd eBfd!YZId"eBfd#YZJeJeHeFd$d%d&d'd(ZKeKjLZLej8d)ZMd*eAfd+YZNdS(,iN(tBytesIOi(tDistlibException(turljointurlparset urlunparset url2pathnamet pathname2urltqueuetquotetunescapet string_typest build_openertHTTPRedirectHandlert text_typetRequestt HTTPErrortURLError(t DistributiontDistributionPatht make_dist(tMetadata( tcached_propertytparse_credentialst ensure_slashtsplit_filenametget_project_datatparse_requirementtparse_name_and_versiont ServerProxytnormalize_name(t get_schemetUnsupportedVersionError(tWheelt is_compatibles^(\w+)=([a-f0-9]+)s;\s*charset\s*=\s*(.*)\s*$stext/html|application/x(ht)?mlshttps://pypi.python.org/pypicCs1|dkrt}nt|dd}|jS(s Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. ttimeoutg@N(tNonet DEFAULT_INDEXRt list_packages(turltclient((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytget_all_distribution_names)s  tRedirectHandlercBs%eZdZdZeZZZRS(sE A class to work around a bug in some Python 3.2.x releases. c Csd}x(dD] }||kr ||}Pq q W|dkrAdSt|}|jdkrt|j|}t|dr|j||q||| Clear any errors which may have been logged. N(RR(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt clear_errorsscCs|jjdS(N(RDtclear(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt clear_cachescCs|jS(N(t_scheme(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _get_schemescCs ||_dS(N(RV(R3tvalue((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _set_schemescCstddS(s= For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. s Please implement in the subclassN(tNotImplementedError(R3tname((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _get_projects cCstddS(sJ Return all the distribution names known to this locator. s Please implement in the subclassN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytget_distribution_namesscCsj|jdkr!|j|}nE||jkr@|j|}n&|j|j|}||j|<|S(s For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. N(RDR#R\RS(R3R[RP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt get_projects  cCsyt|}tj|j}t}|jd}|rTtt||j}n|j dkd|j k|||fS(su Give an url a score which can be used to choose preferred URLs for a given project release. s.whlthttpsspypi.python.org( Rt posixpathtbasenametpathtTruetendswithR!R t wheel_tagsR.tnetloc(R3R&ttRat compatibletis_wheel((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt score_urls cCs{|}|rw|j|}|j|}||kr?|}n||kratjd||qwtjd||n|S(s{ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implementation favours https:// URLs over http://, archives from PyPI over those from other locations, wheel compatibility (if a wheel) and then the archive name. sNot replacing %r with %rsReplacing %r with %r(Rjtloggertdebug(R3turl1turl2RPts1ts2((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt prefer_urls    cCs t||S(sZ Attempt to split a filename in project name, version and Python version. (R(R3tfilenamet project_name((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRsc Csd}d}t|\}}}}} } | jjdrXtjd|| ntj| } | r| j\} } n d\} } |}|r|ddkr|d }n|j dryt |}t ||j r|dkrt }n||j|}|ri|jd6|jd6|jd 6t||||| d fd 6d jg|jD]}d jt|d^qdd6}qnWqtk r}tjd|qXn|j |jrtj|}}x|jD]}|j |r|t| }|j||}|s@tjd|nu|\}}}| se|||ri|d6|d6|d 6t||||| d fd 6}|r||d= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. sNot a valid requirement: %rsmatcher: %s (%s)iRRs%s did not match %rs%skipping pre-release version %s of %sserror matching %s with %riR:ssorted list: %siN(RR(R#RRRR.RFt requirementRkRlttypeR<R^R[Rt version_classR}t is_prereleaseRMRRtsortedR:textrasRKRt download_urlsR(R3Rt prereleasesRPtrR.RFtversionstslisttvclstkRxtdtsdR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytlocatePsT          $   (s.tar.gzs.tar.bz2s.tars.zips.tgzs.tbz(s.eggs.exes.whl(s.pdfN(s.whl(R<R=R>tsource_extensionstbinary_extensionstexcluded_extensionsR#ReRRIRRRSRURWRYtpropertyR.R\R]R^RjRqRRRRRLR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRBSs.             F  tPyPIRPCLocatorcBs)eZdZdZdZdZRS(s This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). cKs8tt|j|||_t|dd|_dS(s Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. R"g@N(tsuperRRItbase_urlRR'(R3R&tkwargs((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs cCst|jjS(sJ Return all the distribution names known to this locator. (RR'R%(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]sc Csviid6id6}|jj|t}xF|D]>}|jj||}|jj||}td|j}|d|_|d|_|j d|_ |j dg|_ |j d|_ t |}|r0|d } | d |_|j| |_||_|||RIR]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs tPyPIJSONLocatorcBs)eZdZdZdZdZRS(sw This locator uses PyPI's JSON interface. It's very limited in functionality and probably not worth using. cKs)tt|j|t||_dS(N(RRRIRR(R3R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCstddS(sJ Return all the distribution names known to this locator. sNot available from this locatorN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]scCsiid6id6}t|jdt|}yE|jj|}|jj}tj|}t d|j }|d}|d|_ |d|_ |j d|_|j d g|_|j d |_t|}||_|d} |||j RIR]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs  tPagecBszeZdZejdejejBejBZejdejejBZ dZ ejdejZ e dZ RS(s4 This class represents a scraped HTML page. s (rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*))\s+)? href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*)) (\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s ]*)))? s!]+)cCsM||_||_|_|jj|j}|rI|jd|_ndS(sk Initialise an instance with the Unicode page contents and the URL they came from. iN(RRR&t_basetsearchtgroup(R3RR&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs  s[^a-z0-9$&+,/:;=?@.#%_\\|-]cCsd}t}x|jj|jD]}|jd}|dpv|dpv|dpv|dpv|dpv|d}|d p|d p|d }t|j|}t|}|jj d |}|j ||fq(Wt |d ddt }|S(s Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. cSs@t|\}}}}}}t||t||||fS(sTidy up an URL.(RRR(R&R.RfRbRRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytclean%sR,trel1trel2trel3trel4trel5trel6RmRnturl3cSsdt|jdS(Ns%%%2xi(tordR(R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt3R,R:cSs|dS(Ni((Rg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR7R,treverse( Rt_hreftfinditerRt groupdictRRR t _clean_retsubRRRc(R3RRPR}RtrelR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytlinkss   (R<R=R>tretcompiletItStXRRRIRRR(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs tSimpleScrapingLocatorcBseZdZiejd6dd6dd6ZdddZdZd Z d Z e j d e j Zd Zd ZdZdZdZe j dZdZRS(s A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. tdeflatecCstjdttjS(Ntfileobj(tgziptGzipFileRRR(tb((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRER,RcCs|S(N((R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRFR,tnonei cKstt|j|t||_||_i|_t|_t j |_ t|_ t |_||_tj|_tj|_dS(s Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. N(RRRIRRR"t _page_cacheRt_seenRRGt _to_fetcht _bad_hostsRLtskip_externalst num_workerst threadingtRLockt_lockt_gplock(R3R&R"RR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIIs       cCscg|_xSt|jD]B}tjd|j}|jt|j|jj |qWdS(s Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). ttargetN( t_threadstrangeRRtThreadt_fetcht setDaemonRctstartRM(R3tiRg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_prepare_threadscs    cCsOx!|jD]}|jjdq Wx|jD]}|jq.Wg|_dS(su Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. N(RRRR#R(R3Rg((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _wait_threadsps c Csiid6id6}|j||_||_t|jdt|}|jj|jj|j z1t j d||j j ||j jWd|jX|`WdQX|S(NRRs%s/s Queueing %s(RRPRsRRRRRTRRRkRlRRRR(R3R[RPR&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\}s        s<\b(linux-(i\d86|x86_64|arm\w+)|win(32|-amd64)|macosx-?\d+)\bcCs|jj|S(sD Does an URL refer to a platform-specific download? (tplatform_dependentR(R3R&((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_is_platform_dependentscCsp|j|rd}n|j||j}tjd|||rl|j|j|j|WdQXn|S(s% See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. sprocess_download: %s -> %sN( RR#RRsRkRlRRRP(R3R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt_process_downloads   c Cst|\}}}}}}|j|j|j|jrGt}n|jrl|j|j rlt}n|j|jst}ny|d krt}nd|d krt}nO|j |rt}n7|j ddd} | j d krt}nt }t jd |||||S( s Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. thomepagetdownloadthttpR_tftpt:iit localhosts#should_queue: %s (%s) from %s -> %s(RR (R R_R (RRdRRRRLRR{RRtsplitRzRcRkRl( R3tlinktreferrerRR.RfRbt_RPthost((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyt _should_queues*           cCs xtr|jj}zy|r|j|}|dkrEwnx|jD]y\}}||jkrO|jj||j| r|j |||rt j d|||jj |qqOqOWnWn)t k r}|jj t|nXWd|jjX|sPqqWdS(s Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. sQueueing %s from %sN(RcRRKtget_pageR#RRRRRRkRlRRRHR RO(R3R&tpageRRRQ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRs(  !cCst|\}}}}}}|dkrZtjjt|rZtt|d}n||jkr|j|}tj d||nK|j ddd}d}||j krtj d||n t |did d 6}zy7tj d ||jj|d |j} tj d || j} | jdd} tj| r| j} | j} | jd}|r|j|}|| } nd}tj| }|r|jd}ny| j|} Wn tk r| jd} nXt| | }||j| ]*>([^<]+)tzlibt decompressRR#RIRRR\RRRRRRRRRR#R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR;s$           ;tDirectoryLocatorcBs2eZdZdZdZdZdZRS(s? This class locates distributions in a directory tree. cKso|jdt|_tt|j|tjj|}tjj |sbt d|n||_ dS(s Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, t recursivesNot a directory: %rN( RRcR'RR&RIRRbtabspathRRtbase_dir(R3RbR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRI5s cCs|j|jS(s Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. (RdR(R3Rrtparent((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytshould_includeFsc Csiid6id6}xtj|jD]\}}}x|D]}|j||r=tjj||}tddttjj|dddf}|j ||}|r|j ||qq=q=W|j s'Pq'q'W|S(NRRRR,( RtwalkR)R+RbRRRR(RRR'( R3R[RPtroottdirstfilestfnR&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\Ns"   c Cst}xtj|jD]\}}}x|D]}|j||r2tjj||}tddttjj |dddf}|j |d}|r|j |dqq2q2W|j sPqqW|S(sJ Return all the distribution names known to this locator. RR,R[N(RRR,R)R+RbRRRR(RR#RR'(R3RPR-R.R/R0R&R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]^s "   (R<R=R>RIR+R\R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR&0s    t JSONLocatorcBs eZdZdZdZRS(s This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. cCstddS(sJ Return all the distribution names known to this locator. sNot available from this locatorN(RZ(R3((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR]xscCsBiid6id6}t|}|r>x|jdgD]}|ddks9|ddkreq9nt|d|d d |jd d d |j}|j}|d |_d|kr|drd|df|_n|jdi|_|jdi|_|||j <|dj |j t j |d q9Wn|S(NRRR/tptypetsdistt pyversiontsourceR[RxRsPlaceholder for summaryR.R&RRt requirementstexports( RRKRR.RRRt dependenciesR7RxRRR(R3R[RPRRRR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\~s&        .(R<R=R>R]R\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR1qs tDistPathLocatorcBs eZdZdZdZRS(s This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. cKs8tt|j|t|ts+t||_dS(ss Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. N(RR9RIt isinstanceRtAssertionErrortdistpath(R3R<R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCs|jj|}|dkr5iid6id6}nGi||j6it|jg|j6d6itdg|j6d6}|S(NRR(R<tget_distributionR#RxRR(R3R[RRP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR\s  (R<R=R>RIR\(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR9s tAggregatingLocatorcBsPeZdZdZdZdZeejj eZdZ dZ RS(sI This class allows you to chain and/or merge a list of locators. cOs8|jdt|_||_tt|j|dS(s Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). tmergeN(RRLR?tlocatorsRR>RI(R3R@R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIs  cCs5tt|jx|jD]}|jqWdS(N(RR>RUR@(R3R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRUscCs*||_x|jD]}||_qWdS(N(RVR@R.(R3RXR((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRYs c Cs]i}xP|jD]E}|j|}|r|jr|jdi}|jdi}|j||jd}|r|rxF|jD]5\}} ||kr||c| ORIRURYRRBR.tfgetR\R](((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyR>s    ,shttps://pypi.python.org/simple/R"g@R.tlegacys1(?P[\w-]+)\s*\(\s*(==\s*)?(?P[^)]+)\)$tDependencyFindercBsVeZdZddZdZdZdZdZdZ de dZ RS( s0 Locate dependencies for distributions. cCs(|p t|_t|jj|_dS(sf Initialise an instance, using the specified locator to locate distributions. N(tdefault_locatorRRR.(R3R((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRIscCstjd||j}||j|<||j||jf= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. t cantreplace( treqtsRRSR}RxRt frozensetRLRQRRNRc( R3RTtothertproblemstrlistt unmatchedRPRFRP((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyttry_to_replaceos"       # cCsi|_i|_i|_i|_t|p0g}d|krk|jd|tdddgO}nt|tr|}}tj d|nK|j j |d|}}|dkrt d|ntj d|t|_t}t|g}t|g}x|r|j}|j} | |jkrO|j|n/|j| } | |kr~|j|| |n|j|jB} |j} t} ||krxAdD]6}d |}||kr| t|d |O} qqWn| | B| B}x|D]}|j|}|s+tj d||j j |d|}|dkrv| rv|j j |dt}n|dkrtj d||jd|fq+|j|j}}||f|jkr|j|n|j||| kr+||kr+|j|tj d|jq+nxw|D]o}|j} | |jkrr|jj|tj|q2|j| } | |kr2|j|| |q2q2WqWqWt|jj}x<|D]4}||k|_|jrtj d|jqqWtj d|||fS(s Find a distribution and all distributions it depends on. :param requirement: The requirement specifying the distribution to find, or a Distribution instance. :param meta_extras: A list of meta extras such as :test:, :build: and so on. :param prereleases: If ``True``, allow pre-release versions to be returned - otherwise, don't return prereleases unless they're all that's available. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. s:*:s:test:s:build:s:dev:spassed %s as requirementRsUnable to locate %rs located %sttesttbuildtdevs:%s:s %s_requiressNo providers found for %rsCannot satisfy %rt unsatisfiedsAdding %s to install_distss#%s is a build-time dependency only.sfind done for %sN(R^R_R`(RLRJRIRWRROR:RRkRlRRR#RRct requestedRR:RNR]t run_requirest meta_requirestbuild_requirestgetattrRURRxtname_and_versionRtvaluestbuild_time_dependency(R3Rt meta_extrasRRRRZttodot install_distsR[RYtireqtstsreqtstereqtsR:RQt all_reqtsRt providersRTtnRRMRJ((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytfinds                      !       "  "   N( R<R=R>R#RIRNRQRSRUR]RLRs(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyRGs      ((ORtioRRtloggingRR`RRt ImportErrortdummy_threadingR$R,RtcompatRRRRRRRR R R R R1R RRRtdatabaseRRRRRtutilRRRRRRRRRRxRRRR R!t getLoggerR<RkRR|RRRR$R#R(R)tobjectRBRRRRR&R1R9R>RHRtNAME_VERSION_RERG(((s@/usr/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pytsV        d@ :0E:A&[    PK.e[_ s`aadistlib/compat.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2013-2016 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import absolute_import import os import re import sys try: import ssl except ImportError: ssl = None if sys.version_info[0] < 3: # pragma: no cover from StringIO import StringIO string_types = basestring, text_type = unicode from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) def quote(s): if isinstance(s, unicode): s = s.encode('utf-8') return _quote(s) import urllib2 from urllib2 import (Request, urlopen, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, HTTPRedirectHandler, build_opener) if ssl: from urllib2 import HTTPSHandler import httplib import xmlrpclib import Queue as queue from HTMLParser import HTMLParser import htmlentitydefs raw_input = raw_input from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse _userprog = None def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: return match.group(1, 2) return None, host else: # pragma: no cover from io import StringIO string_types = str, text_type = str from io import TextIOWrapper as file_type import builtins import configparser import shutil from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, HTTPRedirectHandler, build_opener) if ssl: from urllib.request import HTTPSHandler from urllib.error import HTTPError, URLError, ContentTooShortError import http.client as httplib import urllib.request as urllib2 import xmlrpc.client as xmlrpclib import queue from html.parser import HTMLParser import html.entities as htmlentitydefs raw_input = input from itertools import filterfalse filter = filter try: from ssl import match_hostname, CertificateError except ImportError: # pragma: no cover class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False parts = dn.split('.') leftmost, remainder = parts[0], parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") try: from types import SimpleNamespace as Container except ImportError: # pragma: no cover class Container(object): """ A generic container for when multiple values need to be returned """ def __init__(self, **kwargs): self.__dict__.update(kwargs) try: from shutil import which except ImportError: # pragma: no cover # Implementation from Python 3.3 def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None # ZipFile is a context manager in 2.7, but not in 2.6 from zipfile import ZipFile as BaseZipFile if hasattr(BaseZipFile, '__enter__'): # pragma: no cover ZipFile = BaseZipFile else: from zipfile import ZipExtFile as BaseZipExtFile class ZipExtFile(BaseZipExtFile): def __init__(self, base): self.__dict__.update(base.__dict__) def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate class ZipFile(BaseZipFile): def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate def open(self, *args, **kwargs): base = BaseZipFile.open(self, *args, **kwargs) return ZipExtFile(base) try: from platform import python_implementation except ImportError: # pragma: no cover def python_implementation(): """Return a string identifying the Python implementation.""" if 'PyPy' in sys.version: return 'PyPy' if os.name == 'java': return 'Jython' if sys.version.startswith('IronPython'): return 'IronPython' return 'CPython' try: import sysconfig except ImportError: # pragma: no cover from ._backport import sysconfig try: callable = callable except NameError: # pragma: no cover from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode fsdecode = os.fsdecode except AttributeError: # pragma: no cover _fsencoding = sys.getfilesystemencoding() if _fsencoding == 'mbcs': _fserrors = 'strict' else: _fserrors = 'surrogateescape' def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, text_type): return filename.encode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): if isinstance(filename, text_type): return filename elif isinstance(filename, bytes): return filename.decode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) try: from tokenize import detect_encoding except ImportError: # pragma: no cover from codecs import BOM_UTF8, lookup import re cookie_re = re.compile("coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] # For converting & <-> & etc. try: from html import escape except ImportError: from cgi import escape if sys.version_info[:2] < (3, 4): unescape = HTMLParser().unescape else: from html import unescape try: from collections import ChainMap except ImportError: # pragma: no cover from collections import MutableMapping try: from reprlib import recursive_repr as _recursive_repr except ImportError: def _recursive_repr(fillvalue='...'): ''' Decorator to make a repr function return fillvalue for a recursive call ''' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self): # like Django's Context.push() 'New ChainMap with a new dict followed by all previous maps.' return self.__class__({}, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() try: from imp import cache_from_source except ImportError: # pragma: no cover def cache_from_source(path, debug_override=None): assert path.endswith('.py') if debug_override is None: debug_override = __debug__ if debug_override: suffix = 'c' else: suffix = 'o' return path + suffix try: from collections import OrderedDict except ImportError: # pragma: no cover ## {{{ http://code.activestate.com/recipes/576693/ (r9) # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running=None): 'od.__repr__() <==> repr(od)' if not _repr_running: _repr_running = {} call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) try: from logging.config import BaseConfigurator, valid_ident except ImportError: # pragma: no cover IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = staticmethod(__import__) def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value PK.e["ggdistlib/version.pycnu[ abc @srdZddlZddlZddlmZddddd d d d gZejeZd e fd YZ de fdYZ de fdYZ ejdZdZeZde fdYZdZde fdYZejddfejddfejddfejddfejddfejd dfejd!d"fejd#d$fejd%d&fejd'd(ff Zejd)dfejd*dfejd+d"fejd!d"fejd,dffZejd-Zd.Zd/Zejd0ejZid1d26d1d36d4d56d1d66d7d86dd6dd"6Zd9Zde fd:YZde fd;YZ ejd<ejZ!d=Z"d>Z#d e fd?YZ$d e fd@YZ%dAe fdBYZ&ie&eeedC6e&ee dDdE6e&e#e%edF6Z'e'dCe'dGtt|dksVtdS(Ni(tstript_stringtparset_partst isinstancettupletAssertionErrortlen(tselftstparts((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__init__scCstddS(Nsplease implement in a subclass(tNotImplementedError(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR$scCs5t|t|kr1td||fndS(Nscannot compare %r and %r(ttypet TypeError(Rtother((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt_check_compatible'scCs|j||j|jkS(N(RR(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__eq__+s cCs|j| S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__ne__/scCs|j||j|jkS(N(RR(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__lt__2s cCs|j|p|j| S(N(R R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__gt__6scCs|j|p|j|S(N(R R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__le__9scCs|j|p|j|S(N(R!R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__ge__<scCs t|jS(N(thashR(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__hash__@scCsd|jj|jfS(Ns%s('%s')(t __class__R R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__repr__CscCs|jS(N(R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt__str__FscCstddS(NsPlease implement in subclasses.(R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt is_prereleaseIs(R R RRRRRR R!R"R#R%R'R(tpropertyR)(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR s            tMatchercBseZdZejdZejdZejdZidd6dd6dd6d d 6d d 6d d6dd6dd6Z dZ dZ e dZ dZdZdZdZdZdZRS(s^(\w[\s\w'.-]*)(\((.*)\))?s'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$s ^\d+(\.\d+)*$cCs ||kS(N((tvtctp((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pytWttcCs||kp||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/YR0s<=cCs||kp||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/ZR0s>=cCs ||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/[R0s==cCs ||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/\R0s===cCs||kp||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/^R0s~=cCs ||kS(N((R,R-R.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR/_R0s!=c Cs|jdkrtdn|j|_}|jj|}|s\td|n|jd}|dj|_|jj |_ g}|drg|dj dD]}|j^q}x|D]}|j j|}|s td||fn|j}|dp#d}|d }|j d r|dkr^td |n|d t}} |jj|s|j|qn|j|t}} |j||| fqWnt||_dS(NsPlease specify a version classs Not valid: %rR0iit,sInvalid %r in %rs~=is.*s==s!=s#'.*' not allowed for %r constraintsi(s==s!=(t version_classtNonet ValueErrorR Rtdist_retmatchtgroupstnametlowertkeytsplittcomp_retendswithtTruetnum_retFalsetappendRR( RRtmR9tclistR-t constraintstoptvntprefix((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRbs: ,     cCst|tr!|j|}nx|jD]\}}}|jj|}t|trmt||}n|sd||jjf}t |n||||s+t Sq+Wt S(s Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: String or :class:`Version` instance. s%r not implemented for %s( RRR4Rt _operatorstgettgetattrR&R RRBR@(Rtversiontoperatort constraintRItftmsg((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR8scCsJd}t|jdkrF|jdddkrF|jdd}n|S(Niis==s===(s==s===(R5RR(Rtresult((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt exact_versions,cCsGt|t|ks*|j|jkrCtd||fndS(Nscannot compare %s and %s(RR:R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs*cCs/|j||j|jko.|j|jkS(N(RR<R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs cCs|j| S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRscCst|jt|jS(N(R$R<R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR%scCsd|jj|jfS(Ns%s(%r)(R&R R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR'scCs|jS(N(R(R((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR(sN(R R R5R4tretcompileR7R>RARJRR8R*RSRRRR%R'R((((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR+Ns,         %      sk^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?(\.(post)(\d+))?(\.(dev)(\d+))?(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$c CsK|j}tj|}|s4td|n|j}td|djdD}x0t|dkr|ddkr|d }qfW|dsd}nt|d}|dd!}|d d !}|d d !}|d }|dkrd}n|dt|df}|dkr.d}n|dt|df}|dkr]d}n|dt|df}|dkrd}nfg} xQ|jdD]@} | j rdt| f} n d| f} | j | qWt| }|s| r|rd}qd}n|s&d}n|s5d}n||||||fS(NsNot a valid version: %scss|]}t|VqdS(N(tint(t.0R,((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys sit.iiiiii i i i tatzt_tfinal(NN((NN((NN(((RYi(RZ(R[(R\( R tPEP440_VERSION_RER8RR9RR=RRVR5tisdigitRC( RRDR9tnumstepochtpretposttdevtlocalRtpart((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _pep_440_keysT  #%                      cBsAeZdZdZedddddgZedZRS(sIA rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # minimum two numbers 1.2a # release level must have a release serial 1.2.3b cCsQt|}tj|}|j}td|djdD|_|S(Ncss|]}t|VqdS(N(RV(RWR,((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys siRX(t_normalized_keyR]R8R9RR=t_release_clause(RRRRRDR9((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs   &RYtbR-trcRccstfdjDS(Nc3s(|]}|r|djkVqdS(iN(t PREREL_TAGS(RWtt(R(s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pys s(tanyR(R((Rs?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR)s(R R R RtsetRkR*R)(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRs cCsUt|}t|}||kr(tS|j|s;tSt|}||dkS(NRX(tstrR@t startswithRBR(txtytn((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _match_prefix"s    cBseZeZidd6dd6dd6dd6dd 6d d 6d d 6dd6ZdZdZdZdZdZ dZ dZ dZ dZ RS(t_match_compatibles~=t _match_ltR1t _match_gtR2t _match_les<=t _match_ges>=t _match_eqs==t_match_arbitrarys===t _match_nes!=cCsx|r"d|ko|jd}n|jd o:|jd}|rn|jjddd}|j|}n||fS(Nt+iii(RRR=R4(RRMRORIt strip_localR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _adjust_local<scCsj|j|||\}}||kr+tS|j}djg|D]}t|^qA}t|| S(NRX(RRBRhtjoinRoRt(RRMRORItrelease_clausetitpfx((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRvJs   (cCsj|j|||\}}||kr+tS|j}djg|D]}t|^qA}t|| S(NRX(RRBRhRRoRt(RRMRORIRRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRwRs   (cCs%|j|||\}}||kS(N(R(RRMRORI((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRxZscCs%|j|||\}}||kS(N(R(RRMRORI((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRy^scCsC|j|||\}}|s0||k}nt||}|S(N(RRt(RRMRORIRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRzbs cCst|t|kS(N(Ro(RRMRORI((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR{jscCsD|j|||\}}|s0||k}nt|| }|S(N(RRt(RRMRORIRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR|ms cCs|j|||\}}||kr+tS||kr;tS|j}t|dkrc|d }ndjg|D]}t|^qp}t||S(NiiRX(RR@RBRhRRRoRt(RRMRORIRRR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRuus    ((R R RR4RJRRvRwRxRyRzR{R|Ru(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR-s&         s[.+-]$R0s^[.](\d)s0.\1s^[.-]s ^\((.*)\)$s\1s^v(ersion)?\s*(\d+)s\2s^r(ev)?\s*(\d+)s[.]{2,}RXs\b(alfa|apha)\btalphas\b(pre-alpha|prealpha)\bs pre.alphas \(beta\)$tbetas ^[:~._+-]+s [,*")([\]]s[~:+_ -]s\.$s (\d+(\.\d+)*)c Cs|jj}x&tD]\}}|j||}qW|sJd}ntj|}|snd}|}n|jdjd}g|D]}t|^q}x#t |dkr|j dqWt |dkr||j }nDdj g|dD]}t |^q||j }|d }dj g|D]}t |^qB}|j}|rx)tD]\}}|j||}qvWn|s|}n&d|krdnd}|||}t|sd}n|S( s Try to suggest a semantic form for a version for which _suggest_normalized_version couldn't come up with anything. s0.0.0iRXiRct-R}N(R R;t _REPLACEMENTStsubt_NUMERIC_PREFIXR8R9R=RVRRCtendRRot_SUFFIX_REPLACEMENTSt is_semverR5( RRRtpattreplRDRItsuffixRtsep((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt_suggest_semantic_versions:  : (    cCs yt||SWntk r%nX|j}xSd2d3d4d5d6d7d8d9d:d;d<d=d>d?d@fD]\}}|j||}qfWtjdd|}tjdd|}tjdd|}tjdd|}tjdd|}|jdr |d }ntjd!d|}tjd"d#|}tjd$d%|}tjd&d|}tjd'd(|}tjd)d(|}tjd*d |}tjd+d,|}tjd-d%|}tjd.d/|}tjd0d1|}yt|Wntk rdA}nX|S(BsSuggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. s-alphaRYs-betaRiRRRjR-s-finalR0s-pres-releases.releases-stableR}RXR[t s.finalR\spre$tpre0sdev$tdev0s([abc]|rc)[\-\.](\d+)$s\1\2s[\-\.](dev)[\-\.]?r?(\d+)$s.\1\2s[.~]?([abc])\.?s\1R,is\b0+(\d+)(?!\d)s (\d+[abc])$s\g<1>0s\.?(dev-r|dev\.r)\.?(\d+)$s.dev\2s-(a|b|c)(\d+)$s[\.\-](dev|devel)$s.dev0s(?![\.\-])dev$s(final|stable)$s\.?(r|-|-r)\.?(\d+)$s.post\2s\.?(dev|git|bzr)\.?(\d+)$s\.?(pre|preview|-c)(\d+)$sc\g<2>sp(\d+)$s.post\1(s-alphaRY(s-betaRi(RRY(RRi(RjR-(s-finalR0(s-preR-(s-releaseR0(s.releaseR0(s-stableR0(R}RX(R[RX(RR0(s.finalR0(R\R0N(RgRR;treplaceRTRRpR5(RtrstorigR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt_suggest_normalized_versionsH           s([a-z]+|\d+|[\.-])R-Ratpreviewsfinal-RRjt@RccCsd}g}x||D]}|jdr|dkrgx'|rc|ddkrc|jq@Wnx'|r|ddkr|jqjWn|j|qWt|S(NcSsg}xtj|jD]j}tj||}|rd|d koUdknrl|jd}n d|}|j|qqW|jd|S(Nt0it9it*s*final(t _VERSION_PARTR=R;t_VERSION_REPLACERKtzfillRC(RRRR.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt get_partsIs   Rs*finalis*final-t00000000(RptpopRCR(RRRRR.((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyt _legacy_keyHs  cBs eZdZedZRS(cCs t|S(N(R(RR((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRcscCsRt}xE|jD]:}t|tr|jdr|dkrt}PqqW|S(NRs*final(RBRRRRpR@(RRRRq((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyR)fs (R R RR*R)(((s?/usr/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyRbs cBs?eZeZeejZded s~   1k =$ W  . r       #    PK.e[&QEEdistlib/__init__.pynu[# -*- coding: utf-8 -*- # # Copyright (C) 2012-2016 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging __version__ = '0.2.4' class DistlibException(Exception): pass try: from logging import NullHandler except ImportError: # pragma: no cover class NullHandler(logging.Handler): def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None logger = logging.getLogger(__name__) logger.addHandler(NullHandler()) PK.e[i`胟pkg_resources/__init__.pycnu[ abcF@@s dZddlmZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlZddlZddlZddlZddlZddlZddlmZyddlZWnek rEddlZnXddlmZddlmZm Z m!Z!ddlm"Z"y&ddlm#Z#m$Z$m%Z%e&Z'Wnek re(Z'nXdd lm)Z*dd l+m,Z,m-Z-yddl.j/Z0e0j1Wnek re2Z0nXdd lm3Z3dd lm4Z4e5d e5de5de5dddfej6koddfknrdZ7e j8e7ne2Z9e2Z:de;fdYZ<de=fdYZ>de>e4j?j@fdYZAde>e4j?jBfdYZCdZDiZEdZFdZGdZHdZId ZJd!ZKd"ZLd#ZMZNd$ZOd%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDddEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdigFZPd@eQfdjYZRdAeRfdkYZSdleSfdmYZTdBeRfdnYZUdCeRfdoYZViZWej?d ZXdZYdpZZdqZ[dZ\drZ]dsZ^dtZ_gduZ`dvZadwZbejcdxZdejcdyZeebZfdzZgd{ZhehZid|Zjd}Zke2d~ZldZmdXfdYZndYenfdYZod;e=fdYZpdeqfdYZrd:e=fdYZsesZtdDeufdYZvd<fdYZwdZxdZydZzdZ{dZ|dZ}e2dZ~d_fdYZe^e=ed`efdYZdaefdYZejd]efdYZeZdeqfdYZdefdYZde jfdYZdbefdYZe^e jedZefdYZd[efdYZd\efdYZeFddidZe(dZe(dZee jee(dZee=edZe(dZeejeee0dr(ee0jeneFddieFddidZdZdZdZe2dZdZeejeee jeee0dree0jendZee=edZidZdZdZdZejcdjZejcdejejBjZd?e=fdYZdZdZd=e=fdYZdefdYZdefdYZied6ed6ed6ZdZdefdYZdZd>e4jjfdYZdZdZdZdZdZdZe jdde<de&dZeedZedZdS(sZ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. i(tabsolute_importN(t get_importer(tsix(turllibtmaptfilter(tutime(tmkdirtrenametunlink(topen(tisdirtsplit(tappdirs(t packagingspip._vendor.packaging.versions pip._vendor.packaging.specifierss"pip._vendor.packaging.requirementsspip._vendor.packaging.markersisLSupport for Python 3.0-3.2 has been dropped. Future versions will fail here.t PEP440WarningcB@seZdZRS(sa Used when there is an issue with a version or specifier not complying with PEP 440. (t__name__t __module__t__doc__(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR[st_SetuptoolsVersionMixincB@sYeZdZdZdZdZdZdZdZdZ dZ RS( cC@stt|jS(N(tsuperRt__hash__(tself((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRcscC@s9t|trt||kStt|j|SdS(N(t isinstancettupleRRt__lt__(Rtother((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRfscC@s9t|trt||kStt|j|SdS(N(RRRRt__le__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRlscC@s9t|trt||kStt|j|SdS(N(RRRRt__eq__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRrscC@s9t|trt||kStt|j|SdS(N(RRRRt__ge__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRxscC@s9t|trt||kStt|j|SdS(N(RRRRt__gt__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR~scC@s9t|trt||kStt|j|SdS(N(RRRRt__ne__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@st||S(N(R(Rtkey((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __getitem__sc#@stjdtjidd6dd6dd6dd6dd 6jfd fd }tjd td dx|t|D] }|VqWdS(Ns(\d+ | [a-z]+ | \.| -)tctpretpreviewsfinal-t-trct@tdevc3@suxij|D]X}||}| s|dkr>qn|d dkr_|jdVqd|VqWdVdS(Nt.it 0123456789it*s*final(R tzfill(tstpart(t component_retreplace(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_parse_version_partss c@sg}x|jD]}|jdr|dkrdx'|r`|ddkr`|jq=Wnx'|r|ddkr|jqgWn|j|qWt|S(NR+s*finalis*final-t00000000(tlowert startswithtpoptappendR(R-tpartsR.(R1(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytold_parse_versions sYou have iterated over the result of pkg_resources.parse_version. This is a legacy behavior which is inconsistent with the new version class introduced in setuptools 8.0. In most cases, conversion to a tuple is unnecessary. For comparison of versions, sort the Version instances directly. If you have another use case requiring the tuple, please file a bug with the setuptools project describing that need.t stackleveli(tretcompiletVERBOSEtgettwarningstwarntRuntimeWarningtstr(RR8R.((R1R/R0sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__iter__s ( RRRRRRRRRR!RB(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRbs        tSetuptoolsVersioncB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCstSetuptoolsLegacyVersioncB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRDscC@s3yt|SWntjjk r.t|SXdS(N(RCRtversiontInvalidVersionRD(tv((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt parse_versionscK@s-tj|tjtj||dS(N(tglobalstupdatet _state_varstdicttfromkeys(tvartypetkw((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_declare_statescC@sLi}t}x6tjD](\}}|d|||||tcC@s{t}tj|}|dk rwtjdkrwy-ddjtd |jdf}Wqwt k rsqwXn|S(sZReturn this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. tdarwins macosx-%s-%sR)iiN( tget_build_platformtmacosVersionStringtmatchRatsystplatformtjoint _macosx_verstgroupt ValueError(tplattm((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_supported_platforms - trequiret run_scriptt get_providertget_distributiontload_entry_pointt get_entry_maptget_entry_infotiter_entry_pointstresource_stringtresource_streamtresource_filenametresource_listdirtresource_existstresource_isdirtdeclare_namespacet working_settadd_activation_listenertfind_distributionstset_extraction_pathtcleanup_resourcestget_default_cachet Environmentt WorkingSettResourceManagert Distributiont Requirementt EntryPointtResolutionErrortVersionConflicttDistributionNotFoundt UnknownExtratExtractionErrortparse_requirementsRHt safe_namet safe_versiont get_platformtcompatible_platformst yield_linestsplit_sectionst safe_extrat to_filenametinvalid_markertevaluate_markertensure_directorytnormalize_pathtEGG_DISTt BINARY_DISTt SOURCE_DISTt CHECKOUT_DISTt DEVELOP_DISTtIMetadataProvidertIResourceProvidert FileMetadatat PathMetadatat EggMetadatat EmptyProvidertempty_providert NullProvidert EggProvidertDefaultProvidert ZipProvidertregister_findertregister_namespace_handlertregister_loader_typetfixup_namespace_packagesRtrun_maintAvailableDistributionscB@seZdZdZRS(s.Abstract base for dependency resolution errorscC@s|jjt|jS(N(t __class__RtreprRb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__repr__Is(RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRFscB@sDeZdZdZedZedZdZdZRS(s An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. s3{self.dist} is installed but {self.req} is requiredcC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytdistWscC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytreq[scC@s|jjtS(N(t _templatetformattlocals(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytreport_scC@s$|s |S|j|f}t|S(st If required_by is non-empty, return a version of self that is a ContextualVersionConflict. (RbtContextualVersionConflict(Rt required_byRb((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt with_contextbs( RRRRtpropertyRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRMs  RcB@s*eZdZejdZedZRS(s A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. s by {self.required_by}cC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRus(RRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRms cB@sSeZdZdZedZedZedZdZdZ RS(s&A requested distribution was not foundsSThe '{self.req}' distribution was not found and is required by {self.requirers_str}cC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt requirersscC@s|js dSdj|jS(Nsthe applications, (RRk(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt requirers_strs cC@s|jjtS(N(RRR(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s |jS(N(R(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__str__s( RRRRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzs cB@seZdZRS(s>Distribution doesn't have an "extra feature" of the given name(RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRsiiicC@s|t|scB@s>eZdZdZdZdZdZdZRS(cC@sdS(s;Does the package's distribution contain the named metadata?N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt has_metadataDRdcC@sdS(s'The named metadata resource as a stringN((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt get_metadataGRdcC@sdS(sYield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_metadata_linesJRdcC@sdS(s>Is the named metadata a directory? (like ``os.path.isdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytmetadata_isdirPRdcC@sdS(s?List of metadata names in the directory (like ``os.listdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytmetadata_listdirSRdcC@sdS(s=Execute the named script in the supplied namespace dictionaryN((Rt namespace((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRsVRd(RRRRRRRRs(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCs      cB@sDeZdZdZdZdZdZdZdZRS(s3An object that provides access to package resourcescC@sdS(sdReturn a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``N((tmanagert resource_name((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_filename]RdcC@sdS(siReturn a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_streambRdcC@sdS(smReturn a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_stringgRdcC@sdS(s,Does the package contain the named resource?N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt has_resourcelRdcC@sdS(s>Is the named resource a directory? (like ``os.path.isdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRoRdcC@sdS(s?List of resource names in the directory (like ``os.listdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}rRd( RRRRRRRRR}(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRZs     cB@seZdZddZedZedZdZdZ dZ ddZ dZ d Z deed Zdded Zdded Zd ZedZdZdZdZRS(sDA collection of active distributions on sys.path (or a similar list)cC@s^g|_i|_i|_g|_|dkr<tj}nx|D]}|j|qCWdS(s?Create working set from list of path entries (default=sys.path)N(tentriest entry_keystby_keyt callbacksRaRiRt add_entry(RRtentry((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__init__ys       cC@se|}yddlm}Wntk r1|SXy|j|Wntk r`|j|SX|S(s1 Prepare the master working set. i(t __requires__(t__main__RRRrRt_build_from_requirements(tclstwsR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _build_masters   cC@s|g}t|}|j|t}x|D]}|j|q4Wx0tjD]%}||jkrU|j|qUqUW|jtj(|S(sQ Build a working set from a requirement spec. Rewrites sys.path. (RtresolveRtaddRiRRR (Rtreq_specRtreqstdistsRR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs    cC@sT|jj|g|jj|x*t|tD]}|j||tq3WdS(sAdd a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) N(R t setdefaultRR6RRRR(RR R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s cC@s|jj|j|kS(s9True if `dist` is the active distribution for its project(R R=R (RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __contains__scC@sC|jj|j}|dk r?||kr?t||n|S(sFind a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. N(R R=R RaR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cc@sgx`|D]X}|j|}|dkrGx4|jD] }|Vq5Wq||kr||VqqWdS(sYield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). N(RwRatvalues(RRmRRRtep((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRys    cC@sQtjdj}|d}|j||d<|j|dj||dS(s?Locate distribution for `requires` and run `script_name` scriptiRiN(RiRRR\RrRs(RtrequiresRRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRss    cc@spi}xc|jD]X}||jkr+qnx:|j|D]+}||kr9d||<|j|Vq9q9WqWdS(sYield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. iN(RR R (RtseentitemR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRBs  cC@s|r"|j|j|d|n|dkr:|j}n|jj|g}|jj|jg}| r|j|jkrdS||j|j<|j|kr|j|jn|j|kr|j|jn|j |dS(sAdd `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. R0N( t insert_onRRatlocationR RR R R6t _added_new(RRR tinsertR0tkeystkeys2((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@s"t|ddd}i}i}g}t}tjt} x|r|jd} | |krmqFn|j| sqFn|j| j} | dkr|j j| j} | dks| | krq|rq|} |dkr| dkrt |j }qt g}t g} n|j| | |} || j<| dkrq| j| d} t| | qqn|j| n| | kr| | }t| | j|n| j| jddd}|j|x/|D]'}| |j| j| j|| Map each requirement to the extras that demanded it. c@s:fd|jddD}j p9t|S(s Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. c3@s(|]}jji|d6VqdS(textraN(tmarkertevaluate(t.0RM(R(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pys s(N(N(R=RaRNtany(RRt extra_evals((RsF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR,s (RRRR,(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR(scB@seZdZd eedZdZdZd dZ dZ dZ d dZ d dZ d Zd Zd ZRS( s5Searchable snapshot of distributions on a search pathcC@s,i|_||_||_|j|dS(s!Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. N(t_distmapRjtpythontscan(Rt search_pathRjRT((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@sC|jdks0|jdks0|j|jkoBt|j|jS(sIs distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. N(RTRat py_versionRRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytcan_addscC@s|j|jj|dS(s"Remove `dist` from the environmentN(RSR tremove(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRY(scC@sQ|dkrtj}nx2|D]*}x!t|D]}|j|q2WqWdS(sdScan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. N(RaRiRRR(RRVR R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRU,s    cC@s|j}|jj|gS(sReturn a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. (R3RSR=(RR0tdistribution_key((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR!;s cC@su|j|rq|jrq|jj|jg}||krq|j||jdtjddt qqndS(sLAdd `dist` if we ``can_add()`` it and it has not already been added R thashcmptreverseN( RXt has_versionRSRR R6R<toperatort attrgetterR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRFs   cC@sW|j|}|dk r|Sx%||jD]}||kr-|Sq-W|j||S(sFind distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. N(RRaR tobtain(RRRR3R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR-Os   cC@s|dk r||SdS(sObtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.N(Ra(Rt requirementR3((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR`es cc@s0x)|jjD]}||r|VqqWdS(s=Yield the unique project names of the available distributionsN(RSR%(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRBqs cC@s{t|tr|j|nXt|trdxF|D](}x||D]}|j|qFWq5Wntd|f|S(s2In-place addition of a distribution or environmentsCan't add %r to environment(RRRRR(RRtprojectR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__iadd__ws cC@s@|jgdddd}x||fD]}||7}q(W|S(s4Add an environment or distribution to an environmentRjRTN(RRa(RRtnewR2((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__add__sN(RRRRaRqtPY_MAJORRRXRYRUR!RR-R`RBRcRe(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs       cB@seZdZRS(sTAn error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail (RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cB@seZdZdZdZdZdZdZdZ dZ dZ dZ dd Z ed Zd Zd Zed ZRS(s'Manage resource extraction and packagescC@s i|_dS(N(t cached_files(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@st|j|S(sDoes the named resource exist?(RtR(Rtpackage_or_requirementR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR~scC@st|j|S(s,Is the named resource an existing directory?(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@st|j||S(s4Return a true filesystem path for specified resource(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR|s cC@st|j||S(s9Return a readable file-like object for specified resource(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR{s cC@st|j||S(s%Return specified resource as a string(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzs cC@st|j|S(s1List the contents of the named resource directory(RtR}(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}s cC@sttjd}|jpt}tjdj}t|jt }||_ ||_ ||_ |dS(s5Give an error message for problems extracting file(s)is Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. N( Ritexc_infotextraction_pathRttextwraptdedenttlstripRRRRt cache_pathtoriginal_error(Rtold_excRnttmplterr((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytextraction_errors   cC@sn|jpt}tjj||d|}yt|Wn|jnX|j|d|j|<|S(sReturn absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. s-tmpi( RjRRRRkt_bypass_ensure_directoryRst_warn_unsafe_extraction_pathRg(Rt archive_nametnamest extract_patht target_path((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_cache_paths   cC@swtjdkr*|jtjd r*dStj|j}|tj@sV|tj@rsd|}tj |t ndS(sN If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. tnttwindirNs%s is writable by group/others and vulnerable to attack when used with get_resource_filename. Consider a more secure location (set with .set_extraction_path or the PYTHON_EGG_CACHE environment variable).( RRR4tenvirontstattst_modetS_IWOTHtS_IWGRPR>R?t UserWarning(Rtmodetmsg((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRus &cC@s@tjdkr<tj|jdBd@}tj||ndS(s4Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. tposiximiN(RRR~Rtchmod(RttempnametfilenameR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt postprocessscC@s%|jrtdn||_dS(sSet the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) s5Can't change extraction path, files already extractedN(RgRnRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR)s  cC@sdS(sB Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. N((Rtforce((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCRdN((RRRRaRjRR~RR|R{RzR}RsRzt staticmethodRuRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs           cC@s"tjjdp!tjddS(s Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". tPYTHON_EGG_CACHEtappnames Python-Eggs(RR}R=R tuser_cache_dir(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRQscC@stjdd|S(sConvert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. s[^A-Za-z0-9.]+R%(R:tsub(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR]scC@sZyttjj|SWn9tjjk rU|jdd}tjdd|SXdS(sB Convert an arbitrary string to a standard version string RR)s[^A-Za-z0-9.]+R%N(RARREtVersionRFR0R:R(RE((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRes cC@stjdd|jS(sConvert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. s[^A-Za-z0-9.-]+R(R:RR3(RM((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRqscC@s|jddS(s|Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. R%R(R0(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzscC@s;yt|Wn&tk r6}d|_d|_|SXtS(so Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. N(Rt SyntaxErrorRaRtlinenoR(ttextte((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs  cC@sLy tjj|}|jSWn%tjjk rG}t|nXdS(s Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. N(RtmarkerstMarkerROt InvalidMarkerR(RRMRNR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cB@seZdZdZdZdZdZdZdZ dZ dZ dZ dZ dZd Zd Zd Zd Zd ZdZdZdZdZdZRS(sETry to implement resources and metadata for arbitrary PEP 302 loaderscC@s:t|dd|_tjjt|dd|_dS(NRt__file__Rd(RRaRRRtdirnamet module_path(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|S(N(t_fnR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@stj|j||S(N(tiotBytesIOR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_getRR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_hasRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s%|jo$|j|j|j|S(N(tegg_infoRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@sE|js dS|j|j|j|}tjrA|jdS|S(NRdsutf-8(RRRRtPY3tdecode(RRtvalue((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@st|j|S(N(RR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_isdirRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s%|jo$|j|j|j|S(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_listdirRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}scC@s)|jr%|j|j|j|SgS(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs c B@sd|}|j|s,ed|n|j|jdd}|jdd}|j|j|}||dsN( RRRRRRRRaRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR6s    t ZipManifestscB@s#eZdZedZeZRS(s zip manifest builder c@s?t|-fdjD}t|SWdQXdS(s Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. c3@s3|])}|jdtjj|fVqdS(RN(R0Rtseptgetinfo(RPR(tzfile(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pys UsN(tContextualZipFiletnamelistRL(RRRR((RsF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytbuildJs  (RRRRLRtload(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyREstMemoizedZipManifestscB@s)eZdZejddZdZRS(s% Memoized zipfile manifests. t manifest_modsmanifest mtimecC@svtjj|}tj|j}||ksC||j|krk|j|}|j||||"os.rename" and "os.unlink" are not supported on this platforms .$extracttdirR{(t_indexRRRRkRRRt WRITE_SUPPORTtIOErrorRzRRt _is_currentt_mkstemptwriteRRRRRRterrortisfileRR Rs( RRRRtlastRRt real_pathtoutfttmpnam((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs@$    c C@s|j|j|\}}tjj|s2tStj|}|j|ks_|j|krctS|j j |}t |d}|j }WdQX||kS(sK Return True if the file_path is current for this zip_path RN( RRRRRRR~tst_sizeRRRR R( Rt file_pathRRRR~t zip_contentstft file_contents((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@sa|jdkrZg}x6dD].}|j|r|j|j|qqW||_n|jS(Nsnative_libs.txtseager_resources.txt(snative_libs.txtseager_resources.txt(RRaRR/R(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs  cC@sy |jSWntk ri}x~|jD]s}|jtj}xX|rtjj|d }||kr||j|dPqF|jg||R?(RR treplacement_charRqR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRLs  cC@st|j|S(N(RR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRTs(RRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR1s     cB@seZdZdZRS(ssMetadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) cC@s||_||_dS(N(RR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRls (RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRXscB@seZdZdZRS(s Metadata provider for .egg filescC@s`|jtj|_||_|jrFtjj|j|j|_n |j|_|j dS(s-Create a metadata provider from a zipimporterN( RRRRRtprefixRRkRR(Rtimporter((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRts   ! (RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRqsRLt_distribution_finderscC@s|t|>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' >>> _by_version_descending(names) ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] cS@sYtjj|\}}tj|jd|g}g|D]}tjj|^q=S(s6 Parse each component of the filename R%( RRtsplitextt itertoolstchainR RRER(RtextR7R.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _by_versionsR R\(tsortedR(RwR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_by_version_descendings cc@s9t|}tjj|r5tj|tjr5t|rntj|dt |tjj |dVq5t tj |}x|D]}|j }|jds|jdr=tjj ||}tjj|rttj |dkrqnt ||}n t|}tj|||dtVq| rt|rttjj ||}x|D] } | VqrWq| r|jdrttjj ||} | j} WdQXxa| D]V} | jsqntjj || j} t| }x|D] }|VqWPqWqqWndS( s6Yield distributions accessible on a sys.path directoryR sEGG-INFOs .egg-infos .dist-infoit precedences .egg-linkN(t_normalize_cachedRRR taccesstR_OKRRRRRkR!RR3RRRt from_locationRRR t readlineststriptrstrip(R RRtpath_item_entriesR R3tfullpathR RRt entry_filet entry_linestlineRR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt find_on_pathsB '             t FileFindert_namespace_handlerst_namespace_packagescC@s|t|[^-]+) ( -(?P[^-]+) ( -py(?P[^-]+) ( -(?P.+) )? )? )? cB@seZdZd d ddZdZdZedZdZ dddZ e j dZ eddZed Zedd Zedd ZRS(s3Object representing an advertised importable objectcC@snt|std|n||_||_t||_tjddj|j |_ ||_ dS(NsInvalid module namesx[%s]t,( tMODULERnRt module_nameRtattrsRRRkR.R(RRR[R\R.R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   "cC@sfd|j|jf}|jr<|ddj|j7}n|jrb|ddj|j7}n|S(Ns%s = %st:R)s [%s]RY(RR[R\RkR.(RR-((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@sdt|S(NsEntryPoint.parse(%r)(RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscO@sO| s|s|r,tjdtddn|rE|j||n|jS(sH Require packages for this EntryPoint, then resolve it. sJParameters to load are deprecated. Call .resolve and .require separately.R9i(R>R?tDeprecationWarningRrR(RRrRbR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@sdt|jddgdd}ytjt|j|SWn%tk r_}tt|nXdS(sD Resolve the entry point from its module and attrs. tfromlistRtleveliN( RR[t functoolstreduceRR\RRRA(RRtexc((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@si|jr%|j r%td|n|jj|j}tj|||}tttj|dS(Ns&Can't require() without a distribution( R.RRRRRR'RR(RR2R3RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRr s s]\s*(?P.+?)\s*=\s*(?P[\w.]+)\s*(:\s*(?P[\w.]+))?\s*(?P\[.*\])?\s*$cC@s|jj|}|s0d}t||n|j}|j|d}|drl|djdnd}||d|d|||S(sParse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional s9EntryPoint must be in 'name=module:attrs [extras]' formatR.tattrR)RR((tpatternRhRnt groupdictt _parse_extrasR (RtsrcRRpRtresR.R\((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  #cC@s9|s dStjd|}|jr2tn|jS(Ntx((RRtspecsRnR.(Rt extras_specR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRg$ s   cC@st|std|ni}xZt|D]L}|j||}|j|krptd||jn|||j '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. s R(RREt LegacyVersionRRRkRlR(R0R>R?RtvarsR(RtLVt is_legacyRq((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_warn_legacy_version s  cC@sgy |jSWnUtk rbt|j|j}|dkr^d}t||j|n|SXdS(Ns(Missing 'Version:' header and/or %s file(R~RR|t _get_metadatatPKG_INFORaRn(RRERq((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRE s   cC@sy |jSWntk rigd6}|_xdD]}xt|j|D]\}}|rd|kr|jdd\}}t|rg}qt|sg}qnt|pd}n|j |gj t |qRWq6W|SXdS(Ns requires.txts depends.txtR]i(s requires.txts depends.txt( t_Distribution__dep_mapRRaRRR RRRRR/R(RtdmRRMRRN((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_dep_map s    "     'cC@s|j}g}|j|jddxS|D]K}y|j|t|Wq/tk rytd||fq/Xq/W|S(s@List of Requirements needed for this distro if `extras` are useds%s has no such extra feature %rN((RR/R=RaRRR(RR.RtdepsR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   cc@s5|j|r1x|j|D] }|VqWndS(N(RR(RRR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR scC@s|dkrtj}n|j|d||tjkrt|jx6|jdD]"}|tjkrWt|qWqWWndS(s>Ensure distribution is importable on `path` (default=sys.path)R0snamespace_packages.txtN( RaRiRR!RR"RRR(RRR0tpkg((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytactivate s   cC@sOdt|jt|j|jp'tf}|jrK|d|j7}n|S(s@Return what this distribution's standard .egg filename should bes %s-%s-py%sR%(RR0RERWRfRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  cC@s(|jrd||jfSt|SdS(Ns%s (%s)(R"RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s cC@sMyt|dd}Wntk r/d}nX|p9d}d|j|fS(NREs[unknown version]s%s %s(RRaRnR0(RRE((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR% s    cC@s.|jdrt|nt|j|S(sADelegate all unrecognized public attributes to .metadata providerR(R4RRR(RRd((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __getattr__- scK@s(|jt|tjj|||S(N(R&R#RRR(RRR RO((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR3 scC@sTt|jtjjr1d|j|jf}nd|j|jf}tj|S(s?Return a ``Requirement`` that matches this distribution exactlys%s==%ss%s===%s(RRRRERR0RR(Rtspec((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR=: scC@sD|j||}|dkr:td||ffn|jS(s=Return the `name` entry point of `group` or raise ImportErrorsEntry point %r not foundN(RxRaRR(RRmRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRvC s cC@scy |j}Wn3tk rBtj|jd|}|_nX|dk r_|j|iS|S(s=Return the entry point map for `group`, or the full entry mapsentry_points.txtN(t_ep_mapRRRrRRaR=(RRmtep_map((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRwJ s    cC@s|j|j|S(s<Return the EntryPoint object for `group`+`name`, or ``None``(RwR=(RRmR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRxV sc C@s|p |j}|sdSt|}tjj|}g|D]}|rVt|pY|^q>}xt|D]\}}||kr|rPqdSqo||kro|jtkro| r|||krdS|tjkr|j n|j |||j ||PqoqoW|tjkr.|j n|rG|j d|n |j |dSxMt ry|j ||d} Wntk rPq[X|| =|| =| }q[WdS(sEnsure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. Nii(R"R#RRRt enumerateR"RRitcheck_version_conflictR$R6RR?Rn( RRtlocR0tnloctbdirRItnpathR tnp((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR!Z sB +       cC@s|jdkrdStj|jd}t|j}x|jdD]}|tjksJ||ksJ|tkr}qJn|dkrqJnt tj|dd}|rt|j |sJ|j |jrqJnt d|||jfqJWdS( Nt setuptoolssnamespace_packages.txts top_level.txtt pkg_resourcestsiteRsIModule %s was already imported from %s, but %s is being added to sys.path(RRR( R RLRMRRR"RiRR2RRaR4t issue_warning(RtnspRtmodnametfn((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s"  cC@s8y |jWn&tk r3tdt|tSXtS(NsUnbuilt egg for (RERnRRRR(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR] s   cK@sYd}x0|jD]"}|j|t||dqW|jd|j|j|S(s@Copy this distribution, substituting in any changed keyword argss<project_name version py_version platform location precedenceR N(R RRRaRR(RRORwRd((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytclone s  cC@s g|jD]}|r |^q S(N(R(Rtdep((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR. sN((+RRRRRaRfRRRLR&RRR[RRRRRRRR RRRERRRRRRRRRRR=RvRwRxR!RR]RR.(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRc sN                    C   tEggInfoDistributioncB@seZdZRS(cC@s.t|j|j}|r*||_n|S(s Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. (R|RRR~(Rt md_version((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  (RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR stDistInfoDistributioncB@sJeZdZdZejdZedZedZ dZ RS(sGWrap an actual or potential sys.path entry w/metadata, .dist-info styletMETADATAs([\(,])\s*(\d.*?)\s*([,\)])cC@sTy |jSWnBtk rO|j|j}tjjj||_|jSXdS(sParse and cache metadataN(t _pkg_infoRRRtemailtparsertParsertparsestr(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_parsed_pkg_info s   cC@s6y |jSWn$tk r1|j|_|jSXdS(N(t_DistInfoDistribution__dep_mapRt_compute_dependencies(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   c@sigd6}|_gx3|jjdp2gD]}jt|q3Wfd}t|d}|dj|xR|jjdpgD]8}t|j}t t|||||R?(RbROR`RT((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  tRequirementParseErrorcB@seZdZRS(cC@sdj|jS(NR(RkRb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s(RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR scc@stt|}xm|D]e}d|krA||jd }n|jdrs|d j}|t|7}nt|VqWdS(sYield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. s #s\iN(RxRRRR(RwR(RWRmR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR# s  cB@sMeZdZdZdZdZdZdZedZ RS(cC@sytt|j|Wn+tjjk rG}tt|nX|j|_ t |j}||j |_ |_ g|jD]}|j|jf^q|_ttt|j|_|j |jt|j|jrt|jndf|_t|j|_dS(s>DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!N(RRRRR1tInvalidRequirementRRARt unsafe_nameRR3R0R t specifierR^RERkRRRR.RRNRathashCmpRt_Requirement__hash(Rtrequirement_stringRR0R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR7 s + $cC@st|to|j|jkS(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRK scC@s ||k S(N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRQ scC@sGt|tr1|j|jkr%tS|j}n|jj|dtS(Nt prereleases(RRR RRERtcontainsR(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRT s  cC@s|jS(N(R(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR` scC@sdt|S(NsRequirement.parse(%r)(RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRc RdcC@st|\}|S(N(R(R-R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRe s( RRRRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR6 s     cC@s:t|ts3d|tfdY}|jdS|jS(s&Get an mro for a type or classic classRcB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRo si(RRtobjectt__mro__(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_get_mrok s cC@sAx:tt|dt|D]}||kr||SqWdS(s2Return an adapter factory for `ob` from `registry`RN(RRR(tregistryR]tt((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRv s% cC@s8tjj|}tjj|s4tj|ndS(s1Ensure that the parent directory of `path` existsN(RRRR tmakedirs(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR} scC@s^tstdnt|\}}|rZ|rZt| rZt|t|dndS(s/Sandbox-bypassing version of ensure_directory()s*"os.mkdir" not supported on this platform.iN(RRR R RtR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRt s  cc@sd}g}xt|D]y}|jdr|jdrs|sI|rW||fVn|dd!j}g}qtd|q|j|qW||fVdS(ssSplit a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. t[t]iisInvalid section headingN(RaRR4RR(RnR6(R-tsectiontcontentR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  cO@s7tj}ztt_tj||SWd|t_XdS(N(RR tos_openttempfiletmkstemp(RbROtold_open((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   tignoretcategoryR6cO@s||||S(N((RRbR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _call_aside s cC@sSt}||dR~RatpkgutilR^RjR)Rt email.parserRRRkRRRJRtimpt pip._vendorRtpip._vendor.six.movesRRRRRRR RRRR Rtos.pathR R timportlib.machineryt machineryRRRaR RRt version_infoRR?RrRR@RRRRERRCRRDRHRKRPRVRXR[R^R_R`t _sget_nonet _sset_noneRqt__all__t ExceptionRRRRRRRfRRRRRRRtRlRRfR;RgRRRRsRRuRvRwRxRRRRLR(RRt RuntimeErrorRRRRRRRRRRRRRRRRRRRRRRRRRRRRR!R/t ImpImporterRR0RR=R:RRRORPRR#RR8RRhRZR<t IGNORECASERRRvR|RRRRRRnRRR1RRRRRtRRtfilterwarningsRRIRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyts                               +b                                  .    z    a   '      .    !        ~ f/   5      PK.e[i`胟pkg_resources/__init__.pyonu[ abcF@@s dZddlmZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlZddlZddlZddlZddlZddlZddlZddlZddlZddlmZyddlZWnek rEddlZnXddlmZddlmZm Z m!Z!ddlm"Z"y&ddlm#Z#m$Z$m%Z%e&Z'Wnek re(Z'nXdd lm)Z*dd l+m,Z,m-Z-yddl.j/Z0e0j1Wnek re2Z0nXdd lm3Z3dd lm4Z4e5d e5de5de5dddfej6koddfknrdZ7e j8e7ne2Z9e2Z:de;fdYZ<de=fdYZ>de>e4j?j@fdYZAde>e4j?jBfdYZCdZDiZEdZFdZGdZHdZId ZJd!ZKd"ZLd#ZMZNd$ZOd%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDddEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdigFZPd@eQfdjYZRdAeRfdkYZSdleSfdmYZTdBeRfdnYZUdCeRfdoYZViZWej?d ZXdZYdpZZdqZ[dZ\drZ]dsZ^dtZ_gduZ`dvZadwZbejcdxZdejcdyZeebZfdzZgd{ZhehZid|Zjd}Zke2d~ZldZmdXfdYZndYenfdYZod;e=fdYZpdeqfdYZrd:e=fdYZsesZtdDeufdYZvd<fdYZwdZxdZydZzdZ{dZ|dZ}e2dZ~d_fdYZe^e=ed`efdYZdaefdYZejd]efdYZeZdeqfdYZdefdYZde jfdYZdbefdYZe^e jedZefdYZd[efdYZd\efdYZeFddidZe(dZe(dZee jee(dZee=edZe(dZeejeee0dr(ee0jeneFddieFddidZdZdZdZe2dZdZeejeee jeee0dree0jendZee=edZidZdZdZdZejcdjZejcdejejBjZd?e=fdYZdZdZd=e=fdYZdefdYZdefdYZied6ed6ed6ZdZdefdYZdZd>e4jjfdYZdZdZdZdZdZdZe jdde<de&dZeedZedZdS(sZ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. i(tabsolute_importN(t get_importer(tsix(turllibtmaptfilter(tutime(tmkdirtrenametunlink(topen(tisdirtsplit(tappdirs(t packagingspip._vendor.packaging.versions pip._vendor.packaging.specifierss"pip._vendor.packaging.requirementsspip._vendor.packaging.markersisLSupport for Python 3.0-3.2 has been dropped. Future versions will fail here.t PEP440WarningcB@seZdZRS(sa Used when there is an issue with a version or specifier not complying with PEP 440. (t__name__t __module__t__doc__(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR[st_SetuptoolsVersionMixincB@sYeZdZdZdZdZdZdZdZdZ dZ RS( cC@stt|jS(N(tsuperRt__hash__(tself((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRcscC@s9t|trt||kStt|j|SdS(N(t isinstancettupleRRt__lt__(Rtother((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRfscC@s9t|trt||kStt|j|SdS(N(RRRRt__le__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRlscC@s9t|trt||kStt|j|SdS(N(RRRRt__eq__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRrscC@s9t|trt||kStt|j|SdS(N(RRRRt__ge__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRxscC@s9t|trt||kStt|j|SdS(N(RRRRt__gt__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR~scC@s9t|trt||kStt|j|SdS(N(RRRRt__ne__(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@st||S(N(R(Rtkey((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __getitem__sc#@stjdtjidd6dd6dd6dd6dd 6jfd fd }tjd td dx|t|D] }|VqWdS(Ns(\d+ | [a-z]+ | \.| -)tctpretpreviewsfinal-t-trct@tdevc3@suxij|D]X}||}| s|dkr>qn|d dkr_|jdVqd|VqWdVdS(Nt.it 0123456789it*s*final(R tzfill(tstpart(t component_retreplace(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_parse_version_partss c@sg}x|jD]}|jdr|dkrdx'|r`|ddkr`|jq=Wnx'|r|ddkr|jqgWn|j|qWt|S(NR+s*finalis*final-t00000000(tlowert startswithtpoptappendR(R-tpartsR.(R1(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytold_parse_versions sYou have iterated over the result of pkg_resources.parse_version. This is a legacy behavior which is inconsistent with the new version class introduced in setuptools 8.0. In most cases, conversion to a tuple is unnecessary. For comparison of versions, sort the Version instances directly. If you have another use case requiring the tuple, please file a bug with the setuptools project describing that need.t stackleveli(tretcompiletVERBOSEtgettwarningstwarntRuntimeWarningtstr(RR8R.((R1R/R0sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__iter__s ( RRRRRRRRRR!RB(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRbs        tSetuptoolsVersioncB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCstSetuptoolsLegacyVersioncB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRDscC@s3yt|SWntjjk r.t|SXdS(N(RCRtversiontInvalidVersionRD(tv((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt parse_versionscK@s-tj|tjtj||dS(N(tglobalstupdatet _state_varstdicttfromkeys(tvartypetkw((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_declare_statescC@sLi}t}x6tjD](\}}|d|||||tcC@s{t}tj|}|dk rwtjdkrwy-ddjtd |jdf}Wqwt k rsqwXn|S(sZReturn this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. tdarwins macosx-%s-%sR)iiN( tget_build_platformtmacosVersionStringtmatchRatsystplatformtjoint _macosx_verstgroupt ValueError(tplattm((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_supported_platforms - trequiret run_scriptt get_providertget_distributiontload_entry_pointt get_entry_maptget_entry_infotiter_entry_pointstresource_stringtresource_streamtresource_filenametresource_listdirtresource_existstresource_isdirtdeclare_namespacet working_settadd_activation_listenertfind_distributionstset_extraction_pathtcleanup_resourcestget_default_cachet Environmentt WorkingSettResourceManagert Distributiont Requirementt EntryPointtResolutionErrortVersionConflicttDistributionNotFoundt UnknownExtratExtractionErrortparse_requirementsRHt safe_namet safe_versiont get_platformtcompatible_platformst yield_linestsplit_sectionst safe_extrat to_filenametinvalid_markertevaluate_markertensure_directorytnormalize_pathtEGG_DISTt BINARY_DISTt SOURCE_DISTt CHECKOUT_DISTt DEVELOP_DISTtIMetadataProvidertIResourceProvidert FileMetadatat PathMetadatat EggMetadatat EmptyProvidertempty_providert NullProvidert EggProvidertDefaultProvidert ZipProvidertregister_findertregister_namespace_handlertregister_loader_typetfixup_namespace_packagesRtrun_maintAvailableDistributionscB@seZdZdZRS(s.Abstract base for dependency resolution errorscC@s|jjt|jS(N(t __class__RtreprRb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__repr__Is(RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRFscB@sDeZdZdZedZedZdZdZRS(s An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. s3{self.dist} is installed but {self.req} is requiredcC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytdistWscC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytreq[scC@s|jjtS(N(t _templatetformattlocals(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytreport_scC@s$|s |S|j|f}t|S(st If required_by is non-empty, return a version of self that is a ContextualVersionConflict. (RbtContextualVersionConflict(Rt required_byRb((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt with_contextbs( RRRRtpropertyRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRMs  RcB@s*eZdZejdZedZRS(s A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. s by {self.required_by}cC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRus(RRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRms cB@sSeZdZdZedZedZedZdZdZ RS(s&A requested distribution was not foundsSThe '{self.req}' distribution was not found and is required by {self.requirers_str}cC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s |jdS(Ni(Rb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt requirersscC@s|js dSdj|jS(Nsthe applications, (RRk(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt requirers_strs cC@s|jjtS(N(RRR(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s |jS(N(R(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__str__s( RRRRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzs cB@seZdZRS(s>Distribution doesn't have an "extra feature" of the given name(RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRsiiicC@s|t|scB@s>eZdZdZdZdZdZdZRS(cC@sdS(s;Does the package's distribution contain the named metadata?N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt has_metadataDRdcC@sdS(s'The named metadata resource as a stringN((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt get_metadataGRdcC@sdS(sYield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_metadata_linesJRdcC@sdS(s>Is the named metadata a directory? (like ``os.path.isdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytmetadata_isdirPRdcC@sdS(s?List of metadata names in the directory (like ``os.listdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytmetadata_listdirSRdcC@sdS(s=Execute the named script in the supplied namespace dictionaryN((Rt namespace((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRsVRd(RRRRRRRRs(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCs      cB@sDeZdZdZdZdZdZdZdZRS(s3An object that provides access to package resourcescC@sdS(sdReturn a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``N((tmanagert resource_name((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_filename]RdcC@sdS(siReturn a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_streambRdcC@sdS(smReturn a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_resource_stringgRdcC@sdS(s,Does the package contain the named resource?N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt has_resourcelRdcC@sdS(s>Is the named resource a directory? (like ``os.path.isdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRoRdcC@sdS(s?List of resource names in the directory (like ``os.listdir()``)N((R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}rRd( RRRRRRRRR}(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRZs     cB@seZdZddZedZedZdZdZ dZ ddZ dZ d Z deed Zdded Zdded Zd ZedZdZdZdZRS(sDA collection of active distributions on sys.path (or a similar list)cC@s^g|_i|_i|_g|_|dkr<tj}nx|D]}|j|qCWdS(s?Create working set from list of path entries (default=sys.path)N(tentriest entry_keystby_keyt callbacksRaRiRt add_entry(RRtentry((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__init__ys       cC@se|}yddlm}Wntk r1|SXy|j|Wntk r`|j|SX|S(s1 Prepare the master working set. i(t __requires__(t__main__RRRrRt_build_from_requirements(tclstwsR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _build_masters   cC@s|g}t|}|j|t}x|D]}|j|q4Wx0tjD]%}||jkrU|j|qUqUW|jtj(|S(sQ Build a working set from a requirement spec. Rewrites sys.path. (RtresolveRtaddRiRRR (Rtreq_specRtreqstdistsRR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs    cC@sT|jj|g|jj|x*t|tD]}|j||tq3WdS(sAdd a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) N(R t setdefaultRR6RRRR(RR R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s cC@s|jj|j|kS(s9True if `dist` is the active distribution for its project(R R=R (RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __contains__scC@sC|jj|j}|dk r?||kr?t||n|S(sFind a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. N(R R=R RaR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cc@sgx`|D]X}|j|}|dkrGx4|jD] }|Vq5Wq||kr||VqqWdS(sYield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). N(RwRatvalues(RRmRRRtep((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRys    cC@sQtjdj}|d}|j||d<|j|dj||dS(s?Locate distribution for `requires` and run `script_name` scriptiRiN(RiRRR\RrRs(RtrequiresRRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRss    cc@spi}xc|jD]X}||jkr+qnx:|j|D]+}||kr9d||<|j|Vq9q9WqWdS(sYield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. iN(RR R (RtseentitemR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRBs  cC@s|r"|j|j|d|n|dkr:|j}n|jj|g}|jj|jg}| r|j|jkrdS||j|j<|j|kr|j|jn|j|kr|j|jn|j |dS(sAdd `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. R0N( t insert_onRRatlocationR RR R R6t _added_new(RRR tinsertR0tkeystkeys2((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@s"t|ddd}i}i}g}t}tjt} x|r|jd} | |krmqFn|j| sqFn|j| j} | dkr|j j| j} | dks| | krq|rq|} |dkr| dkrt |j }qt g}t g} n|j| | |} || j<| dkrq| j| d} t| | qqn|j| n| | kr| | }t| | j|n| j| jddd}|j|x/|D]'}| |j| j| j|| Map each requirement to the extras that demanded it. c@s:fd|jddD}j p9t|S(s Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. c3@s(|]}jji|d6VqdS(textraN(tmarkertevaluate(t.0RM(R(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pys s(N(N(R=RaRNtany(RRt extra_evals((RsF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR,s (RRRR,(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR(scB@seZdZd eedZdZdZd dZ dZ dZ d dZ d dZ d Zd Zd ZRS( s5Searchable snapshot of distributions on a search pathcC@s,i|_||_||_|j|dS(s!Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. N(t_distmapRjtpythontscan(Rt search_pathRjRT((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@sC|jdks0|jdks0|j|jkoBt|j|jS(sIs distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. N(RTRat py_versionRRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytcan_addscC@s|j|jj|dS(s"Remove `dist` from the environmentN(RSR tremove(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRY(scC@sQ|dkrtj}nx2|D]*}x!t|D]}|j|q2WqWdS(sdScan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. N(RaRiRRR(RRVR R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRU,s    cC@s|j}|jj|gS(sReturn a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. (R3RSR=(RR0tdistribution_key((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR!;s cC@su|j|rq|jrq|jj|jg}||krq|j||jdtjddt qqndS(sLAdd `dist` if we ``can_add()`` it and it has not already been added R thashcmptreverseN( RXt has_versionRSRR R6R<toperatort attrgetterR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRFs   cC@sW|j|}|dk r|Sx%||jD]}||kr-|Sq-W|j||S(sFind distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. N(RRaR tobtain(RRRR3R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR-Os   cC@s|dk r||SdS(sObtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.N(Ra(Rt requirementR3((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR`es cc@s0x)|jjD]}||r|VqqWdS(s=Yield the unique project names of the available distributionsN(RSR%(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRBqs cC@s{t|tr|j|nXt|trdxF|D](}x||D]}|j|qFWq5Wntd|f|S(s2In-place addition of a distribution or environmentsCan't add %r to environment(RRRRR(RRtprojectR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__iadd__ws cC@s@|jgdddd}x||fD]}||7}q(W|S(s4Add an environment or distribution to an environmentRjRTN(RRa(RRtnewR2((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt__add__sN(RRRRaRqtPY_MAJORRRXRYRUR!RR-R`RBRcRe(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs       cB@seZdZRS(sTAn error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail (RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cB@seZdZdZdZdZdZdZdZ dZ dZ dZ dd Z ed Zd Zd Zed ZRS(s'Manage resource extraction and packagescC@s i|_dS(N(t cached_files(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@st|j|S(sDoes the named resource exist?(RtR(Rtpackage_or_requirementR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR~scC@st|j|S(s,Is the named resource an existing directory?(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@st|j||S(s4Return a true filesystem path for specified resource(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR|s cC@st|j||S(s9Return a readable file-like object for specified resource(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR{s cC@st|j||S(s%Return specified resource as a string(RtR(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzs cC@st|j|S(s1List the contents of the named resource directory(RtR}(RRhR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}s cC@sttjd}|jpt}tjdj}t|jt }||_ ||_ ||_ |dS(s5Give an error message for problems extracting file(s)is Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. N( Ritexc_infotextraction_pathRttextwraptdedenttlstripRRRRt cache_pathtoriginal_error(Rtold_excRnttmplterr((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytextraction_errors   cC@sn|jpt}tjj||d|}yt|Wn|jnX|j|d|j|<|S(sReturn absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. s-tmpi( RjRRRRkt_bypass_ensure_directoryRst_warn_unsafe_extraction_pathRg(Rt archive_nametnamest extract_patht target_path((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytget_cache_paths   cC@swtjdkr*|jtjd r*dStj|j}|tj@sV|tj@rsd|}tj |t ndS(sN If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. tnttwindirNs%s is writable by group/others and vulnerable to attack when used with get_resource_filename. Consider a more secure location (set with .set_extraction_path or the PYTHON_EGG_CACHE environment variable).( RRR4tenvirontstattst_modetS_IWOTHtS_IWGRPR>R?t UserWarning(Rtmodetmsg((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRus &cC@s@tjdkr<tj|jdBd@}tj||ndS(s4Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. tposiximiN(RRR~Rtchmod(RttempnametfilenameR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt postprocessscC@s%|jrtdn||_dS(sSet the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) s5Can't change extraction path, files already extractedN(RgRnRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR)s  cC@sdS(sB Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. N((Rtforce((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRCRdN((RRRRaRjRR~RR|R{RzR}RsRzt staticmethodRuRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs           cC@s"tjjdp!tjddS(s Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". tPYTHON_EGG_CACHEtappnames Python-Eggs(RR}R=R tuser_cache_dir(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRQscC@stjdd|S(sConvert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. s[^A-Za-z0-9.]+R%(R:tsub(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR]scC@sZyttjj|SWn9tjjk rU|jdd}tjdd|SXdS(sB Convert an arbitrary string to a standard version string RR)s[^A-Za-z0-9.]+R%N(RARREtVersionRFR0R:R(RE((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRes cC@stjdd|jS(sConvert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. s[^A-Za-z0-9.-]+R(R:RR3(RM((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRqscC@s|jddS(s|Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. R%R(R0(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRzscC@s;yt|Wn&tk r6}d|_d|_|SXtS(so Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. N(Rt SyntaxErrorRaRtlinenoR(ttextte((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs  cC@sLy tjj|}|jSWn%tjjk rG}t|nXdS(s Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. N(RtmarkerstMarkerROt InvalidMarkerR(RRMRNR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cB@seZdZdZdZdZdZdZdZ dZ dZ dZ dZ dZd Zd Zd Zd Zd ZdZdZdZdZdZRS(sETry to implement resources and metadata for arbitrary PEP 302 loaderscC@s:t|dd|_tjjt|dd|_dS(NRt__file__Rd(RRaRRRtdirnamet module_path(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|S(N(t_fnR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@stj|j||S(N(tiotBytesIOR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_getRR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_hasRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s%|jo$|j|j|j|S(N(tegg_infoRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@sE|js dS|j|j|j|}tjrA|jdS|S(NRdsutf-8(RRRRtPY3tdecode(RRtvalue((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@st|j|S(N(RR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_isdirRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s%|jo$|j|j|j|S(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@s|j|j|j|S(N(t_listdirRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR}scC@s)|jr%|j|j|j|SgS(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs c B@sd|}|j|s,ed|n|j|jdd}|jdd}|j|j|}||dsN( RRRRRRRRaRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR6s    t ZipManifestscB@s#eZdZedZeZRS(s zip manifest builder c@s?t|-fdjD}t|SWdQXdS(s Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. c3@s3|])}|jdtjj|fVqdS(RN(R0Rtseptgetinfo(RPR(tzfile(sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pys UsN(tContextualZipFiletnamelistRL(RRRR((RsF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytbuildJs  (RRRRLRtload(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyREstMemoizedZipManifestscB@s)eZdZejddZdZRS(s% Memoized zipfile manifests. t manifest_modsmanifest mtimecC@svtjj|}tj|j}||ksC||j|krk|j|}|j||||"os.rename" and "os.unlink" are not supported on this platforms .$extracttdirR{(t_indexRRRRkRRRt WRITE_SUPPORTtIOErrorRzRRt _is_currentt_mkstemptwriteRRRRRRterrortisfileRR Rs( RRRRtlastRRt real_pathtoutfttmpnam((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs@$    c C@s|j|j|\}}tjj|s2tStj|}|j|ks_|j|krctS|j j |}t |d}|j }WdQX||kS(sK Return True if the file_path is current for this zip_path RN( RRRRRRR~tst_sizeRRRR R( Rt file_pathRRRR~t zip_contentstft file_contents((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscC@sa|jdkrZg}x6dD].}|j|r|j|j|qqW||_n|jS(Nsnative_libs.txtseager_resources.txt(snative_libs.txtseager_resources.txt(RRaRR/R(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs  cC@sy |jSWntk ri}x~|jD]s}|jtj}xX|rtjj|d }||kr||j|dPqF|jg||R?(RR treplacement_charRqR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRLs  cC@st|j|S(N(RR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRTs(RRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR1s     cB@seZdZdZRS(ssMetadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) cC@s||_||_dS(N(RR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRls (RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRXscB@seZdZdZRS(s Metadata provider for .egg filescC@s`|jtj|_||_|jrFtjj|j|j|_n |j|_|j dS(s-Create a metadata provider from a zipimporterN( RRRRRtprefixRRkRR(Rtimporter((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRts   ! (RRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRqsRLt_distribution_finderscC@s|t|>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' >>> _by_version_descending(names) ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] cS@sYtjj|\}}tj|jd|g}g|D]}tjj|^q=S(s6 Parse each component of the filename R%( RRtsplitextt itertoolstchainR RRER(RtextR7R.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _by_versionsR R\(tsortedR(RwR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_by_version_descendings cc@s9t|}tjj|r5tj|tjr5t|rntj|dt |tjj |dVq5t tj |}x|D]}|j }|jds|jdr=tjj ||}tjj|rttj |dkrqnt ||}n t|}tj|||dtVq| rt|rttjj ||}x|D] } | VqrWq| r|jdrttjj ||} | j} WdQXxa| D]V} | jsqntjj || j} t| }x|D] }|VqWPqWqqWndS( s6Yield distributions accessible on a sys.path directoryR sEGG-INFOs .egg-infos .dist-infoit precedences .egg-linkN(t_normalize_cachedRRR taccesstR_OKRRRRRkR!RR3RRRt from_locationRRR t readlineststriptrstrip(R RRtpath_item_entriesR R3tfullpathR RRt entry_filet entry_linestlineRR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt find_on_pathsB '             t FileFindert_namespace_handlerst_namespace_packagescC@s|t|[^-]+) ( -(?P[^-]+) ( -py(?P[^-]+) ( -(?P.+) )? )? )? cB@seZdZd d ddZdZdZedZdZ dddZ e j dZ eddZed Zedd Zedd ZRS(s3Object representing an advertised importable objectcC@snt|std|n||_||_t||_tjddj|j |_ ||_ dS(NsInvalid module namesx[%s]t,( tMODULERnRt module_nameRtattrsRRRkR.R(RRR[R\R.R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   "cC@sfd|j|jf}|jr<|ddj|j7}n|jrb|ddj|j7}n|S(Ns%s = %st:R)s [%s]RY(RR[R\RkR.(RR-((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs   cC@sdt|S(NsEntryPoint.parse(%r)(RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRscO@sO| s|s|r,tjdtddn|rE|j||n|jS(sH Require packages for this EntryPoint, then resolve it. sJParameters to load are deprecated. Call .resolve and .require separately.R9i(R>R?tDeprecationWarningRrR(RRrRbR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@sdt|jddgdd}ytjt|j|SWn%tk r_}tt|nXdS(sD Resolve the entry point from its module and attrs. tfromlistRtleveliN( RR[t functoolstreduceRR\RRRA(RRtexc((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRs cC@si|jr%|j r%td|n|jj|j}tj|||}tttj|dS(Ns&Can't require() without a distribution( R.RRRRRR'RR(RR2R3RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRr s s]\s*(?P.+?)\s*=\s*(?P[\w.]+)\s*(:\s*(?P[\w.]+))?\s*(?P\[.*\])?\s*$cC@s|jj|}|s0d}t||n|j}|j|d}|drl|djdnd}||d|d|||S(sParse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional s9EntryPoint must be in 'name=module:attrs [extras]' formatR.tattrR)RR((tpatternRhRnt groupdictt _parse_extrasR (RtsrcRRpRtresR.R\((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  #cC@s9|s dStjd|}|jr2tn|jS(Ntx((RRtspecsRnR.(Rt extras_specR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRg$ s   cC@st|std|ni}xZt|D]L}|j||}|j|krptd||jn|||j '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. s R(RREt LegacyVersionRRRkRlR(R0R>R?RtvarsR(RtLVt is_legacyRq((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_warn_legacy_version s  cC@sgy |jSWnUtk rbt|j|j}|dkr^d}t||j|n|SXdS(Ns(Missing 'Version:' header and/or %s file(R~RR|t _get_metadatatPKG_INFORaRn(RRERq((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRE s   cC@sy |jSWntk rigd6}|_xdD]}xt|j|D]\}}|rd|kr|jdd\}}t|rg}qt|sg}qnt|pd}n|j |gj t |qRWq6W|SXdS(Ns requires.txts depends.txtR]i(s requires.txts depends.txt( t_Distribution__dep_mapRRaRRR RRRRR/R(RtdmRRMRRN((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_dep_map s    "     'cC@s|j}g}|j|jddxS|D]K}y|j|t|Wq/tk rytd||fq/Xq/W|S(s@List of Requirements needed for this distro if `extras` are useds%s has no such extra feature %rN((RR/R=RaRRR(RR.RtdepsR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   cc@s5|j|r1x|j|D] }|VqWndS(N(RR(RRR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR scC@s|dkrtj}n|j|d||tjkrt|jx6|jdD]"}|tjkrWt|qWqWWndS(s>Ensure distribution is importable on `path` (default=sys.path)R0snamespace_packages.txtN( RaRiRR!RR"RRR(RRR0tpkg((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytactivate s   cC@sOdt|jt|j|jp'tf}|jrK|d|j7}n|S(s@Return what this distribution's standard .egg filename should bes %s-%s-py%sR%(RR0RERWRfRj(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  cC@s(|jrd||jfSt|SdS(Ns%s (%s)(R"RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s cC@sMyt|dd}Wntk r/d}nX|p9d}d|j|fS(NREs[unknown version]s%s %s(RRaRnR0(RRE((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR% s    cC@s.|jdrt|nt|j|S(sADelegate all unrecognized public attributes to .metadata providerR(R4RRR(RRd((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt __getattr__- scK@s(|jt|tjj|||S(N(R&R#RRR(RRR RO((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR3 scC@sTt|jtjjr1d|j|jf}nd|j|jf}tj|S(s?Return a ``Requirement`` that matches this distribution exactlys%s==%ss%s===%s(RRRRERR0RR(Rtspec((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR=: scC@sD|j||}|dkr:td||ffn|jS(s=Return the `name` entry point of `group` or raise ImportErrorsEntry point %r not foundN(RxRaRR(RRmRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRvC s cC@scy |j}Wn3tk rBtj|jd|}|_nX|dk r_|j|iS|S(s=Return the entry point map for `group`, or the full entry mapsentry_points.txtN(t_ep_mapRRRrRRaR=(RRmtep_map((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRwJ s    cC@s|j|j|S(s<Return the EntryPoint object for `group`+`name`, or ``None``(RwR=(RRmR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRxV sc C@s|p |j}|sdSt|}tjj|}g|D]}|rVt|pY|^q>}xt|D]\}}||kr|rPqdSqo||kro|jtkro| r|||krdS|tjkr|j n|j |||j ||PqoqoW|tjkr.|j n|rG|j d|n |j |dSxMt ry|j ||d} Wntk rPq[X|| =|| =| }q[WdS(sEnsure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. Nii(R"R#RRRt enumerateR"RRitcheck_version_conflictR$R6RR?Rn( RRtlocR0tnloctbdirRItnpathR tnp((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR!Z sB +       cC@s|jdkrdStj|jd}t|j}x|jdD]}|tjksJ||ksJ|tkr}qJn|dkrqJnt tj|dd}|rt|j |sJ|j |jrqJnt d|||jfqJWdS( Nt setuptoolssnamespace_packages.txts top_level.txtt pkg_resourcestsiteRsIModule %s was already imported from %s, but %s is being added to sys.path(RRR( R RLRMRRR"RiRR2RRaR4t issue_warning(RtnspRtmodnametfn((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s"  cC@s8y |jWn&tk r3tdt|tSXtS(NsUnbuilt egg for (RERnRRRR(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR] s   cK@sYd}x0|jD]"}|j|t||dqW|jd|j|j|S(s@Copy this distribution, substituting in any changed keyword argss<project_name version py_version platform location precedenceR N(R RRRaRR(RRORwRd((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pytclone s  cC@s g|jD]}|r |^q S(N(R(Rtdep((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR. sN((+RRRRRaRfRRRLR&RRR[RRRRRRRR RRRERRRRRRRRRRR=RvRwRxR!RR]RR.(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRc sN                    C   tEggInfoDistributioncB@seZdZRS(cC@s.t|j|j}|r*||_n|S(s Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. (R|RRR~(Rt md_version((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  (RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR stDistInfoDistributioncB@sJeZdZdZejdZedZedZ dZ RS(sGWrap an actual or potential sys.path entry w/metadata, .dist-info styletMETADATAs([\(,])\s*(\d.*?)\s*([,\)])cC@sTy |jSWnBtk rO|j|j}tjjj||_|jSXdS(sParse and cache metadataN(t _pkg_infoRRRtemailtparsertParsertparsestr(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_parsed_pkg_info s   cC@s6y |jSWn$tk r1|j|_|jSXdS(N(t_DistInfoDistribution__dep_mapRt_compute_dependencies(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   c@sigd6}|_gx3|jjdp2gD]}jt|q3Wfd}t|d}|dj|xR|jjdpgD]8}t|j}t t|||||R?(RbROR`RT((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  tRequirementParseErrorcB@seZdZRS(cC@sdj|jS(NR(RkRb(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s(RRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR scc@stt|}xm|D]e}d|krA||jd }n|jdrs|d j}|t|7}nt|VqWdS(sYield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. s #s\iN(RxRRRR(RwR(RWRmR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR# s  cB@sMeZdZdZdZdZdZdZedZ RS(cC@sytt|j|Wn+tjjk rG}tt|nX|j|_ t |j}||j |_ |_ g|jD]}|j|jf^q|_ttt|j|_|j |jt|j|jrt|jndf|_t|j|_dS(s>DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!N(RRRRR1tInvalidRequirementRRARt unsafe_nameRR3R0R t specifierR^RERkRRRR.RRNRathashCmpRt_Requirement__hash(Rtrequirement_stringRR0R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR7 s + $cC@st|to|j|jkS(N(RRR(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRK scC@s ||k S(N((RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRQ scC@sGt|tr1|j|jkr%tS|j}n|jj|dtS(Nt prereleases(RRR RRERtcontainsR(RR ((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRT s  cC@s|jS(N(R(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR` scC@sdt|S(NsRequirement.parse(%r)(RA(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRc RdcC@st|\}|S(N(R(R-R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRe s( RRRRRRRRRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR6 s     cC@s:t|ts3d|tfdY}|jdS|jS(s&Get an mro for a type or classic classRcB@seZRS((RR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRo si(RRtobjectt__mro__(R((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt_get_mrok s cC@sAx:tt|dt|D]}||kr||SqWdS(s2Return an adapter factory for `ob` from `registry`RN(RRR(tregistryR]tt((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRv s% cC@s8tjj|}tjj|s4tj|ndS(s1Ensure that the parent directory of `path` existsN(RRRR tmakedirs(RR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR} scC@s^tstdnt|\}}|rZ|rZt| rZt|t|dndS(s/Sandbox-bypassing version of ensure_directory()s*"os.mkdir" not supported on this platform.iN(RRR R RtR(RRR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyRt s  cc@sd}g}xt|D]y}|jdr|jdrs|sI|rW||fVn|dd!j}g}qtd|q|j|qW||fVdS(ssSplit a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. t[t]iisInvalid section headingN(RaRR4RR(RnR6(R-tsectiontcontentR.((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s  cO@s7tj}ztt_tj||SWd|t_XdS(N(RR tos_openttempfiletmkstemp(RbROtold_open((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyR s   tignoretcategoryR6cO@s||||S(N((RRbR((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyt _call_aside s cC@sSt}||dR~RatpkgutilR^RjR)Rt email.parserRRRkRRRJRtimpt pip._vendorRtpip._vendor.six.movesRRRRRRR RRRR Rtos.pathR R timportlib.machineryt machineryRRRaR RRt version_infoRR?RrRR@RRRRERRCRRDRHRKRPRVRXR[R^R_R`t _sget_nonet _sset_noneRqt__all__t ExceptionRRRRRRRfRRRRRRRtRlRRfR;RgRRRRsRRuRvRwRxRRRRLR(RRt RuntimeErrorRRRRRRRRRRRRRRRRRRRRRRRRRRRRR!R/t ImpImporterRR0RR=R:RRRORPRR#RR8RRhRZR<t IGNORECASERRRvR|RRRRRRnRRR1RRRRRtRRtfilterwarningsRRIRR(((sF/usr/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyts                               +b                                  .    z    a   '      .    !        ~ f/   5      PK.e[-@&>>pkg_resources/__init__.pynu[# coding: utf-8 """ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import operator import platform import collections import plistlib import email.parser import tempfile import textwrap import itertools from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp from pip._vendor import six from pip._vendor.six.moves import urllib, map, filter # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split try: import importlib.machinery as importlib_machinery # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: importlib_machinery = None from pip._vendor import appdirs from pip._vendor import packaging __import__('pip._vendor.packaging.version') __import__('pip._vendor.packaging.specifiers') __import__('pip._vendor.packaging.requirements') __import__('pip._vendor.packaging.markers') if (3, 0) < sys.version_info < (3, 3): msg = ( "Support for Python 3.0-3.2 has been dropped. Future versions " "will fail here." ) warnings.warn(msg) # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*' + part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_' + v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_' + _state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided == required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, six.string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry, replace=replace) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] req_extras = _ReqExtras() # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue if not req_extras.markers_pass(req): continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) req_extras[new_requirement] = req.extras processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback, existing=True): """Invoke `callback` for all distributions If `existing=True` (default), call on all existing ones, as well. """ if callback in self.callbacks: return self.callbacks.append(callback) if not existing: return for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class _ReqExtras(dict): """ Map each requirement to the extras that demanded it. """ def markers_pass(self, req): """ Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. """ extra_evals = ( req.marker.evaluate({'extra': extra}) for extra in self.get(req, ()) + (None,) ) return not req.marker or any(extra_evals) class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version == self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """ Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". """ return ( os.environ.get('PYTHON_EGG_CACHE') or appdirs.user_cache_dir(appname='Python-Eggs') ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-', '_') def invalid_marker(text): """ Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. """ try: evaluate_marker(text) except SyntaxError as e: e.filename = None e.lineno = None return e return False def evaluate_marker(text, extra=None): """ Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. """ try: marker = packaging.markers.Marker(text) return marker.evaluate() except packaging.markers.InvalidMarker as e: raise SyntaxError(e) class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) def get_metadata(self, name): if not self.egg_info: return "" value = self._get(self._fn(self.egg_info, name)) return value.decode('utf-8') if six.PY3 else value def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path != old: if _is_unpacked_egg(path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() @classmethod def _register(cls): loader_cls = getattr(importlib_machinery, 'SourceFileLoader', type(None)) register_loader_type(loader_cls, cls) DefaultProvider._register() class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): return fspath[len(self.egg_root) + 1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): if name != 'PKG-INFO': raise KeyError("No metadata except PKG-INFO is available") with io.open(self.path, encoding='utf-8', errors="replace") as f: metadata = f.read() self._warn_on_replacement(metadata) return metadata def _warn_on_replacement(self, metadata): # Python 2.6 and 3.2 compat for: replacement_char = '�' replacement_char = b'\xef\xbf\xbd'.decode('utf-8') if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" msg = tmpl.format(**locals()) warnings.warn(msg) def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders={}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if _is_unpacked_egg(subitem): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def _by_version_descending(names): """ Given a list of filenames, return them in descending order by version number. >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' >>> _by_version_descending(names) ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] """ def _by_version(name): """ Parse each component of the filename """ name, ext = os.path.splitext(name) parts = itertools.chain(name.split('-'), [ext]) return [packaging.version.parse(part) for part in parts] return sorted(names, key=_by_version, reverse=True) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item, 'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory path_item_entries = _by_version_descending(os.listdir(path_item)) for entry in path_item_entries: lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata if len(os.listdir(fullpath)) == 0: # Empty egg directory, skip. continue metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and _is_unpacked_egg(entry): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if hasattr(importlib_machinery, 'FileFinder'): register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath def _rebuild_mod_path(orig_path, package_name, module): """ Rebuild module.__path__ ensuring that all entries are ordered corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] def safe_sys_path_index(entry): """ Workaround for #520 and #513. """ try: return sys_path.index(entry) except ValueError: return float('inf') def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path """ path_parts = path.split(os.sep) module_parts = package_name.count('.') + 1 parts = path_parts[:-module_parts] return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) orig_path.sort(key=position_in_sys_path) module.__path__[:] = [_normalize_cached(p) for p in orig_path] def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if hasattr(importlib_machinery, 'FileFinder'): register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( path.lower().endswith('.egg') ) def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, six.string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P[^-]+) ( -(?P[^-]+) ( -py(?P[^-]+) ( -(?P.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P.+?)\s*' r'=\s*' r'(?P[\w.]+)\s*' r'(:\s*(?P[\w.]+))?\s*' r'(?P\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name] = ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urllib.parse.urlparse(location) if parsed[-1].startswith('md5='): return urllib.parse.urlunparse(parsed[:-1] + ('',)) return location def _version_from_file(lines): """ Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ is_version_line = lambda line: line.lower().startswith('version:') version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') return safe_version(value.strip()) or None class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None, **kw): project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name', 'ver', 'pyver', 'plat' ) return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw )._reload_version() def _reload_version(self): return self @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: version = _version_from_file(self._get_metadata(self.PKG_INFO)) if version is None: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) return version @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs = [] elif not evaluate_marker(marker): reqs = [] extra = safe_extra(extra) or None dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path, replace=replace) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc=None, replace=False): """Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. """ loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class EggInfoDistribution(Distribution): def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. """ md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) if md_version: self._version = md_version return self class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _compute_dependencies(self): """Recompute this distribution's dependencies.""" dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: reqs.extend(parse_requirements(req)) def reqs_for_extra(extra): for req in reqs: if not req.marker or req.marker.evaluate({'extra': extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args, **kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) for line in lines: # Drop comments -- a hash without a space may be in a URL. if ' #' in line: line = line[:line.find(' #')] # If there is a line continuation, drop it, and append the next line. if line.endswith('\\'): line = line[:-2].strip() line += next(lines) yield Requirement(line) class Requirement(packaging.requirements.Requirement): def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" try: super(Requirement, self).__init__(requirement_string) except packaging.requirements.InvalidRequirement as e: raise RequirementParseError(str(e)) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() self.specs = [ (spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), str(self.marker) if self.marker else None, ) self.__hash = hash(self.hashCmp) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): req, = parse_requirements(s) return req def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path with replace=False and # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). dist = None # ensure dist is defined for del dist below for dist in working_set: dist.activate(replace=False) del dist add_activation_listener(lambda dist: dist.activate(replace=True), existing=False) working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) PK.e[s{ re-vendor.pycnu[ abc@sddlZddlZddlZddlZddlZejjejjeZ dZ dZ dZ e dkreejdkre nejddkre qejdd kre qe ndS( iNcCsdGHtjddS(Ns"Usage: re-vendor.py [clean|vendor]i(tsystexit(((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytusage scCsqxNtjtD]=}tjjt|}tjj|rtj|qqWtjtjjtddS(Nssix.py( tostlistdirtheretpathtjointisdirtshutiltrmtreetunlink(tfntdirname((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytclean s cCsGtjddtddgx$tjdD]}tj|q,WdS(Ntinstalls-ts-rs vendor.txts *.egg-info(tpiptmainRtglobR R (R ((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pytvendorst__main__iiRR(RRRRR RtabspathR t__file__RRRRt__name__tlentargv(((s9/usr/lib/python2.7/site-packages/pip/_vendor/re-vendor.pyts            PK.e[$Uhtml5lib/_trie/datrie.pynu[from __future__ import absolute_import, division, unicode_literals from datrie import Trie as DATrie from pip._vendor.six import text_type from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): chars = set() for key in data.keys(): if not isinstance(key, text_type): raise TypeError("All keys must be strings") for char in key: chars.add(char) self._data = DATrie("".join(chars)) for key, value in data.items(): self._data[key] = value def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): raise NotImplementedError() def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): return self._data.keys(prefix) def has_keys_with_prefix(self, prefix): return self._data.has_keys_with_prefix(prefix) def longest_prefix(self, prefix): return self._data.longest_prefix(prefix) def longest_prefix_item(self, prefix): return self._data.longest_prefix_item(prefix) PK.e[b\html5lib/_trie/__init__.pycnu[ abc@`sdddlmZmZmZddlmZeZyddlmZWne k rYnXeZdS(i(tabsolute_importtdivisiontunicode_literalsi(tTrieN( t __future__RRRtpyRtPyTrietdatrietDATriet ImportError(((sG/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.pyts PK.e[02 html5lib/_trie/datrie.pycnu[ abc@`sfddlmZmZmZddlmZddlmZddl mZ de fdYZdS(i(tabsolute_importtdivisiontunicode_literals(tTrie(t text_typeiRcB`s\eZdZdZdZdZdZd dZdZ dZ dZ RS( cC`st}xP|jD]B}t|ts:tdnx|D]}|j|qAWqWtdj||_x'|j D]\}}||j|sPK.e[Nhtml5lib/_trie/_base.pyonu[ abc@`sFddlmZmZmZddlmZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(tMappingtTriecB`s5eZdZddZdZdZdZRS(uAbstract base class for triescC`sWtt|j}|dkr+t|Stg|D]}|j|r5|^q5S(N(tsuperRtkeystNonetsett startswith(tselftprefixRtx((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyR s  cC`s.x'|jD]}|j|r tSq WtS(N(RR tTruetFalse(R R tkey((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pythas_keys_with_prefixscC`s^||kr|Sx;tdt|dD] }|| |kr*|| Sq*Wt|dS(Ni(trangetlentKeyError(R R ti((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytlongest_prefixs    cC`s|j|}|||fS(N(R(R R tlprefix((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytlongest_prefix_item$sN(t__name__t __module__t__doc__RRRRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyRs   N(t __future__RRRt collectionsRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytsPK.e[ html5lib/_trie/py.pyonu[ abc@`sfddlmZmZmZddlmZddlmZddlm Z de fdYZ dS( i(tabsolute_importtdivisiontunicode_literals(t text_type(t bisect_lefti(tTrieRcB`sJeZdZdZdZdZdZddZdZ RS(cC`sktd|jDs+tdn||_t|j|_d|_dt|f|_dS(Ncs`s|]}t|tVqdS(N(t isinstanceR(t.0tx((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pys suAll keys must be stringsui( talltkeyst TypeErrort_datatsortedt_keyst _cachestrtlent _cachepoints(tselftdata((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__init__ s   cC`s ||jkS(N(R (Rtkey((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt __contains__scC`s t|jS(N(RR (R((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__len__scC`s t|jS(N(titerR (R((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__iter__scC`s |j|S(N(R (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt __getitem__scC`s|dks"|dks"|j r/t|jS|j|jro|j\}}t|j|||}}nt|j|}}t}|t|jkr|Sx8|j|j|r|j|j||d7}qW||_||f|_|S(Nui( tNoneRtsett startswithRRRRtadd(RtprefixtlothitstarttiR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyR s"   cC`s||jkrtS|j|jrO|j\}}t|j|||}nt|j|}|t|jkrztS|j|j|S(N( R tTrueRRRRRRtFalse(RRR R!R#((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pythas_keys_with_prefix6sN( t__name__t __module__RRRRRRR R&(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyR s     N( t __future__RRRtpip._vendor.sixRtbisectRt_baseRtABCTrie(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pytsPK.e[̕>html5lib/_trie/_base.pynu[from __future__ import absolute_import, division, unicode_literals from collections import Mapping class Trie(Mapping): """Abstract base class for tries""" def keys(self, prefix=None): # pylint:disable=arguments-differ keys = super(Trie, self).keys() if prefix is None: return set(keys) # Python 2.6: no set comprehensions return set([x for x in keys if x.startswith(prefix)]) def has_keys_with_prefix(self, prefix): for key in self.keys(): if key.startswith(prefix): return True return False def longest_prefix(self, prefix): if prefix in self: return prefix for i in range(1, len(prefix) + 1): if prefix[:-i] in self: return prefix[:-i] raise KeyError(prefix) def longest_prefix_item(self, prefix): lprefix = self.longest_prefix(prefix) return (lprefix, self[lprefix]) PK.e[02 html5lib/_trie/datrie.pyonu[ abc@`sfddlmZmZmZddlmZddlmZddl mZ de fdYZdS(i(tabsolute_importtdivisiontunicode_literals(tTrie(t text_typeiRcB`s\eZdZdZdZdZdZd dZdZ dZ dZ RS( cC`st}xP|jD]B}t|ts:tdnx|D]}|j|qAWqWtdj||_x'|j D]\}}||j|sPK.e[KOnhtml5lib/_trie/py.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix) PK.e[b\html5lib/_trie/__init__.pyonu[ abc@`sdddlmZmZmZddlmZeZyddlmZWne k rYnXeZdS(i(tabsolute_importtdivisiontunicode_literalsi(tTrieN( t __future__RRRtpyRtPyTrietdatrietDATriet ImportError(((sG/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.pyts PK.e[Nhtml5lib/_trie/_base.pycnu[ abc@`sFddlmZmZmZddlmZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(tMappingtTriecB`s5eZdZddZdZdZdZRS(uAbstract base class for triescC`sWtt|j}|dkr+t|Stg|D]}|j|r5|^q5S(N(tsuperRtkeystNonetsett startswith(tselftprefixRtx((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyR s  cC`s.x'|jD]}|j|r tSq WtS(N(RR tTruetFalse(R R tkey((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pythas_keys_with_prefixscC`s^||kr|Sx;tdt|dD] }|| |kr*|| Sq*Wt|dS(Ni(trangetlentKeyError(R R ti((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytlongest_prefixs    cC`s|j|}|||fS(N(R(R R tlprefix((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytlongest_prefix_item$sN(t__name__t __module__t__doc__RRRRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyRs   N(t __future__RRRt collectionsRR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pytsPK.e[ html5lib/_trie/py.pycnu[ abc@`sfddlmZmZmZddlmZddlmZddlm Z de fdYZ dS( i(tabsolute_importtdivisiontunicode_literals(t text_type(t bisect_lefti(tTrieRcB`sJeZdZdZdZdZdZddZdZ RS(cC`sktd|jDs+tdn||_t|j|_d|_dt|f|_dS(Ncs`s|]}t|tVqdS(N(t isinstanceR(t.0tx((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pys suAll keys must be stringsui( talltkeyst TypeErrort_datatsortedt_keyst _cachestrtlent _cachepoints(tselftdata((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__init__ s   cC`s ||jkS(N(R (Rtkey((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt __contains__scC`s t|jS(N(RR (R((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__len__scC`s t|jS(N(titerR (R((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt__iter__scC`s |j|S(N(R (RR((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyt __getitem__scC`s|dks"|dks"|j r/t|jS|j|jro|j\}}t|j|||}}nt|j|}}t}|t|jkr|Sx8|j|j|r|j|j||d7}qW||_||f|_|S(Nui( tNoneRtsett startswithRRRRtadd(RtprefixtlothitstarttiR ((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyR s"   cC`s||jkrtS|j|jrO|j\}}t|j|||}nt|j|}|t|jkrztS|j|j|S(N( R tTrueRRRRRRtFalse(RRR R!R#((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pythas_keys_with_prefix6sN( t__name__t __module__RRRRRRR R&(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyR s     N( t __future__RRRtpip._vendor.sixRtbisectRt_baseRtABCTrie(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pytsPK.e[ӯ!!html5lib/_trie/__init__.pynu[from __future__ import absolute_import, division, unicode_literals from .py import Trie as PyTrie Trie = PyTrie # pylint:disable=wrong-import-position try: from .datrie import Trie as DATrie except ImportError: pass else: Trie = DATrie # pylint:enable=wrong-import-position PK.e[Ȥ}EEhtml5lib/constants.pynu[from __future__ import absolute_import, division, unicode_literals import string EOF = None E = { "null-character": "Null character in input stream, replaced with U+FFFD.", "invalid-codepoint": "Invalid codepoint in stream.", "incorrectly-placed-solidus": "Solidus (/) incorrectly placed in tag.", "incorrect-cr-newline-entity": "Incorrect CR newline entity, replaced with LF.", "illegal-windows-1252-entity": "Entity used with illegal number (windows-1252 reference).", "cant-convert-numeric-entity": "Numeric entity couldn't be converted to character " "(codepoint U+%(charAsInt)08x).", "illegal-codepoint-for-numeric-entity": "Numeric entity represents an illegal codepoint: " "U+%(charAsInt)08x.", "numeric-entity-without-semicolon": "Numeric entity didn't end with ';'.", "expected-numeric-entity-but-got-eof": "Numeric entity expected. Got end of file instead.", "expected-numeric-entity": "Numeric entity expected but none found.", "named-entity-without-semicolon": "Named entity didn't end with ';'.", "expected-named-entity": "Named entity expected. Got none.", "attributes-in-end-tag": "End tag contains unexpected attributes.", 'self-closing-flag-on-end-tag': "End tag contains unexpected self-closing flag.", "expected-tag-name-but-got-right-bracket": "Expected tag name. Got '>' instead.", "expected-tag-name-but-got-question-mark": "Expected tag name. Got '?' instead. (HTML doesn't " "support processing instructions.)", "expected-tag-name": "Expected tag name. Got something else instead", "expected-closing-tag-but-got-right-bracket": "Expected closing tag. Got '>' instead. Ignoring ''.", "expected-closing-tag-but-got-eof": "Expected closing tag. Unexpected end of file.", "expected-closing-tag-but-got-char": "Expected closing tag. Unexpected character '%(data)s' found.", "eof-in-tag-name": "Unexpected end of file in the tag name.", "expected-attribute-name-but-got-eof": "Unexpected end of file. Expected attribute name instead.", "eof-in-attribute-name": "Unexpected end of file in attribute name.", "invalid-character-in-attribute-name": "Invalid character in attribute name", "duplicate-attribute": "Dropped duplicate attribute on tag.", "expected-end-of-tag-name-but-got-eof": "Unexpected end of file. Expected = or end of tag.", "expected-attribute-value-but-got-eof": "Unexpected end of file. Expected attribute value.", "expected-attribute-value-but-got-right-bracket": "Expected attribute value. Got '>' instead.", 'equals-in-unquoted-attribute-value': "Unexpected = in unquoted attribute", 'unexpected-character-in-unquoted-attribute-value': "Unexpected character in unquoted attribute", "invalid-character-after-attribute-name": "Unexpected character after attribute name.", "unexpected-character-after-attribute-value": "Unexpected character after attribute value.", "eof-in-attribute-value-double-quote": "Unexpected end of file in attribute value (\").", "eof-in-attribute-value-single-quote": "Unexpected end of file in attribute value (').", "eof-in-attribute-value-no-quotes": "Unexpected end of file in attribute value.", "unexpected-EOF-after-solidus-in-tag": "Unexpected end of file in tag. Expected >", "unexpected-character-after-solidus-in-tag": "Unexpected character after / in tag. Expected >", "expected-dashes-or-doctype": "Expected '--' or 'DOCTYPE'. Not found.", "unexpected-bang-after-double-dash-in-comment": "Unexpected ! after -- in comment", "unexpected-space-after-double-dash-in-comment": "Unexpected space after -- in comment", "incorrect-comment": "Incorrect comment.", "eof-in-comment": "Unexpected end of file in comment.", "eof-in-comment-end-dash": "Unexpected end of file in comment (-)", "unexpected-dash-after-double-dash-in-comment": "Unexpected '-' after '--' found in comment.", "eof-in-comment-double-dash": "Unexpected end of file in comment (--).", "eof-in-comment-end-space-state": "Unexpected end of file in comment.", "eof-in-comment-end-bang-state": "Unexpected end of file in comment.", "unexpected-char-in-comment": "Unexpected character in comment found.", "need-space-after-doctype": "No space after literal string 'DOCTYPE'.", "expected-doctype-name-but-got-right-bracket": "Unexpected > character. Expected DOCTYPE name.", "expected-doctype-name-but-got-eof": "Unexpected end of file. Expected DOCTYPE name.", "eof-in-doctype-name": "Unexpected end of file in DOCTYPE name.", "eof-in-doctype": "Unexpected end of file in DOCTYPE.", "expected-space-or-right-bracket-in-doctype": "Expected space or '>'. Got '%(data)s'", "unexpected-end-of-doctype": "Unexpected end of DOCTYPE.", "unexpected-char-in-doctype": "Unexpected character in DOCTYPE.", "eof-in-innerhtml": "XXX innerHTML EOF", "unexpected-doctype": "Unexpected DOCTYPE. Ignored.", "non-html-root": "html needs to be the first start tag.", "expected-doctype-but-got-eof": "Unexpected End of file. Expected DOCTYPE.", "unknown-doctype": "Erroneous DOCTYPE.", "expected-doctype-but-got-chars": "Unexpected non-space characters. Expected DOCTYPE.", "expected-doctype-but-got-start-tag": "Unexpected start tag (%(name)s). Expected DOCTYPE.", "expected-doctype-but-got-end-tag": "Unexpected end tag (%(name)s). Expected DOCTYPE.", "end-tag-after-implied-root": "Unexpected end tag (%(name)s) after the (implied) root element.", "expected-named-closing-tag-but-got-eof": "Unexpected end of file. Expected end tag (%(name)s).", "two-heads-are-not-better-than-one": "Unexpected start tag head in existing head. Ignored.", "unexpected-end-tag": "Unexpected end tag (%(name)s). Ignored.", "unexpected-start-tag-out-of-my-head": "Unexpected start tag (%(name)s) that can be in head. Moved.", "unexpected-start-tag": "Unexpected start tag (%(name)s).", "missing-end-tag": "Missing end tag (%(name)s).", "missing-end-tags": "Missing end tags (%(name)s).", "unexpected-start-tag-implies-end-tag": "Unexpected start tag (%(startName)s) " "implies end tag (%(endName)s).", "unexpected-start-tag-treated-as": "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", "deprecated-tag": "Unexpected start tag %(name)s. Don't use it!", "unexpected-start-tag-ignored": "Unexpected start tag %(name)s. Ignored.", "expected-one-end-tag-but-got-another": "Unexpected end tag (%(gotName)s). " "Missing end tag (%(expectedName)s).", "end-tag-too-early": "End tag (%(name)s) seen too early. Expected other end tag.", "end-tag-too-early-named": "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", "end-tag-too-early-ignored": "End tag (%(name)s) seen too early. Ignored.", "adoption-agency-1.1": "End tag (%(name)s) violates step 1, " "paragraph 1 of the adoption agency algorithm.", "adoption-agency-1.2": "End tag (%(name)s) violates step 1, " "paragraph 2 of the adoption agency algorithm.", "adoption-agency-1.3": "End tag (%(name)s) violates step 1, " "paragraph 3 of the adoption agency algorithm.", "adoption-agency-4.4": "End tag (%(name)s) violates step 4, " "paragraph 4 of the adoption agency algorithm.", "unexpected-end-tag-treated-as": "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", "no-end-tag": "This element (%(name)s) has no end tag.", "unexpected-implied-end-tag-in-table": "Unexpected implied end tag (%(name)s) in the table phase.", "unexpected-implied-end-tag-in-table-body": "Unexpected implied end tag (%(name)s) in the table body phase.", "unexpected-char-implies-table-voodoo": "Unexpected non-space characters in " "table context caused voodoo mode.", "unexpected-hidden-input-in-table": "Unexpected input with type hidden in table context.", "unexpected-form-in-table": "Unexpected form in table context.", "unexpected-start-tag-implies-table-voodoo": "Unexpected start tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-end-tag-implies-table-voodoo": "Unexpected end tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-cell-in-table-body": "Unexpected table cell start tag (%(name)s) " "in the table body phase.", "unexpected-cell-end-tag": "Got table cell end tag (%(name)s) " "while required end tags are missing.", "unexpected-end-tag-in-table-body": "Unexpected end tag (%(name)s) in the table body phase. Ignored.", "unexpected-implied-end-tag-in-table-row": "Unexpected implied end tag (%(name)s) in the table row phase.", "unexpected-end-tag-in-table-row": "Unexpected end tag (%(name)s) in the table row phase. Ignored.", "unexpected-select-in-select": "Unexpected select start tag in the select phase " "treated as select end tag.", "unexpected-input-in-select": "Unexpected input start tag in the select phase.", "unexpected-start-tag-in-select": "Unexpected start tag token (%(name)s in the select phase. " "Ignored.", "unexpected-end-tag-in-select": "Unexpected end tag (%(name)s) in the select phase. Ignored.", "unexpected-table-element-start-tag-in-select-in-table": "Unexpected table element start tag (%(name)s) in the select in table phase.", "unexpected-table-element-end-tag-in-select-in-table": "Unexpected table element end tag (%(name)s) in the select in table phase.", "unexpected-char-after-body": "Unexpected non-space characters in the after body phase.", "unexpected-start-tag-after-body": "Unexpected start tag token (%(name)s)" " in the after body phase.", "unexpected-end-tag-after-body": "Unexpected end tag token (%(name)s)" " in the after body phase.", "unexpected-char-in-frameset": "Unexpected characters in the frameset phase. Characters ignored.", "unexpected-start-tag-in-frameset": "Unexpected start tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-frameset-in-frameset-innerhtml": "Unexpected end tag token (frameset) " "in the frameset phase (innerHTML).", "unexpected-end-tag-in-frameset": "Unexpected end tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-char-after-frameset": "Unexpected non-space characters in the " "after frameset phase. Ignored.", "unexpected-start-tag-after-frameset": "Unexpected start tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-frameset": "Unexpected end tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-body-innerhtml": "Unexpected end tag after body(innerHtml)", "expected-eof-but-got-char": "Unexpected non-space characters. Expected end of file.", "expected-eof-but-got-start-tag": "Unexpected start tag (%(name)s)" ". Expected end of file.", "expected-eof-but-got-end-tag": "Unexpected end tag (%(name)s)" ". Expected end of file.", "eof-in-table": "Unexpected end of file. Expected table content.", "eof-in-select": "Unexpected end of file. Expected select content.", "eof-in-frameset": "Unexpected end of file. Expected frameset content.", "eof-in-script-in-script": "Unexpected end of file. Expected script content.", "eof-in-foreign-lands": "Unexpected end of file. Expected foreign content", "non-void-element-with-trailing-solidus": "Trailing solidus not allowed on element %(name)s", "unexpected-html-element-in-foreign-content": "Element %(name)s not allowed in a non-html context", "unexpected-end-tag-before-html": "Unexpected end tag (%(name)s) before html.", "unexpected-inhead-noscript-tag": "Element %(name)s not allowed in a inhead-noscript context", "eof-in-head-noscript": "Unexpected end of file. Expected inhead-noscript content", "char-in-head-noscript": "Unexpected non-space character. Expected inhead-noscript content", "XXX-undefined-error": "Undefined error (this sucks and should be fixed)", } namespaces = { "html": "http://www.w3.org/1999/xhtml", "mathml": "http://www.w3.org/1998/Math/MathML", "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", "xml": "http://www.w3.org/XML/1998/namespace", "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset([ (namespaces["html"], "applet"), (namespaces["html"], "caption"), (namespaces["html"], "html"), (namespaces["html"], "marquee"), (namespaces["html"], "object"), (namespaces["html"], "table"), (namespaces["html"], "td"), (namespaces["html"], "th"), (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext"), (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title"), ]) formattingElements = frozenset([ (namespaces["html"], "a"), (namespaces["html"], "b"), (namespaces["html"], "big"), (namespaces["html"], "code"), (namespaces["html"], "em"), (namespaces["html"], "font"), (namespaces["html"], "i"), (namespaces["html"], "nobr"), (namespaces["html"], "s"), (namespaces["html"], "small"), (namespaces["html"], "strike"), (namespaces["html"], "strong"), (namespaces["html"], "tt"), (namespaces["html"], "u") ]) specialElements = frozenset([ (namespaces["html"], "address"), (namespaces["html"], "applet"), (namespaces["html"], "area"), (namespaces["html"], "article"), (namespaces["html"], "aside"), (namespaces["html"], "base"), (namespaces["html"], "basefont"), (namespaces["html"], "bgsound"), (namespaces["html"], "blockquote"), (namespaces["html"], "body"), (namespaces["html"], "br"), (namespaces["html"], "button"), (namespaces["html"], "caption"), (namespaces["html"], "center"), (namespaces["html"], "col"), (namespaces["html"], "colgroup"), (namespaces["html"], "command"), (namespaces["html"], "dd"), (namespaces["html"], "details"), (namespaces["html"], "dir"), (namespaces["html"], "div"), (namespaces["html"], "dl"), (namespaces["html"], "dt"), (namespaces["html"], "embed"), (namespaces["html"], "fieldset"), (namespaces["html"], "figure"), (namespaces["html"], "footer"), (namespaces["html"], "form"), (namespaces["html"], "frame"), (namespaces["html"], "frameset"), (namespaces["html"], "h1"), (namespaces["html"], "h2"), (namespaces["html"], "h3"), (namespaces["html"], "h4"), (namespaces["html"], "h5"), (namespaces["html"], "h6"), (namespaces["html"], "head"), (namespaces["html"], "header"), (namespaces["html"], "hr"), (namespaces["html"], "html"), (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), (namespaces["html"], "li"), (namespaces["html"], "link"), (namespaces["html"], "listing"), (namespaces["html"], "marquee"), (namespaces["html"], "menu"), (namespaces["html"], "meta"), (namespaces["html"], "nav"), (namespaces["html"], "noembed"), (namespaces["html"], "noframes"), (namespaces["html"], "noscript"), (namespaces["html"], "object"), (namespaces["html"], "ol"), (namespaces["html"], "p"), (namespaces["html"], "param"), (namespaces["html"], "plaintext"), (namespaces["html"], "pre"), (namespaces["html"], "script"), (namespaces["html"], "section"), (namespaces["html"], "select"), (namespaces["html"], "style"), (namespaces["html"], "table"), (namespaces["html"], "tbody"), (namespaces["html"], "td"), (namespaces["html"], "textarea"), (namespaces["html"], "tfoot"), (namespaces["html"], "th"), (namespaces["html"], "thead"), (namespaces["html"], "title"), (namespaces["html"], "tr"), (namespaces["html"], "ul"), (namespaces["html"], "wbr"), (namespaces["html"], "xmp"), (namespaces["svg"], "foreignObject") ]) htmlIntegrationPointElements = frozenset([ (namespaces["mathml"], "annotaion-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") ]) mathmlTextIntegrationPointElements = frozenset([ (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext") ]) adjustSVGAttributes = { "attributename": "attributeName", "attributetype": "attributeType", "basefrequency": "baseFrequency", "baseprofile": "baseProfile", "calcmode": "calcMode", "clippathunits": "clipPathUnits", "contentscripttype": "contentScriptType", "contentstyletype": "contentStyleType", "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", "gradientunits": "gradientUnits", "kernelmatrix": "kernelMatrix", "kernelunitlength": "kernelUnitLength", "keypoints": "keyPoints", "keysplines": "keySplines", "keytimes": "keyTimes", "lengthadjust": "lengthAdjust", "limitingconeangle": "limitingConeAngle", "markerheight": "markerHeight", "markerunits": "markerUnits", "markerwidth": "markerWidth", "maskcontentunits": "maskContentUnits", "maskunits": "maskUnits", "numoctaves": "numOctaves", "pathlength": "pathLength", "patterncontentunits": "patternContentUnits", "patterntransform": "patternTransform", "patternunits": "patternUnits", "pointsatx": "pointsAtX", "pointsaty": "pointsAtY", "pointsatz": "pointsAtZ", "preservealpha": "preserveAlpha", "preserveaspectratio": "preserveAspectRatio", "primitiveunits": "primitiveUnits", "refx": "refX", "refy": "refY", "repeatcount": "repeatCount", "repeatdur": "repeatDur", "requiredextensions": "requiredExtensions", "requiredfeatures": "requiredFeatures", "specularconstant": "specularConstant", "specularexponent": "specularExponent", "spreadmethod": "spreadMethod", "startoffset": "startOffset", "stddeviation": "stdDeviation", "stitchtiles": "stitchTiles", "surfacescale": "surfaceScale", "systemlanguage": "systemLanguage", "tablevalues": "tableValues", "targetx": "targetX", "targety": "targetY", "textlength": "textLength", "viewbox": "viewBox", "viewtarget": "viewTarget", "xchannelselector": "xChannelSelector", "ychannelselector": "yChannelSelector", "zoomandpan": "zoomAndPan" } adjustMathMLAttributes = {"definitionurl": "definitionURL"} adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), "xlink:href": ("xlink", "href", namespaces["xlink"]), "xlink:role": ("xlink", "role", namespaces["xlink"]), "xlink:show": ("xlink", "show", namespaces["xlink"]), "xlink:title": ("xlink", "title", namespaces["xlink"]), "xlink:type": ("xlink", "type", namespaces["xlink"]), "xml:base": ("xml", "base", namespaces["xml"]), "xml:lang": ("xml", "lang", namespaces["xml"]), "xml:space": ("xml", "space", namespaces["xml"]), "xmlns": (None, "xmlns", namespaces["xmlns"]), "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) } unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in adjustForeignAttributes.items()]) spaceCharacters = frozenset([ "\t", "\n", "\u000C", " ", "\r" ]) tableInsertModeElements = frozenset([ "table", "tbody", "tfoot", "thead", "tr" ]) asciiLowercase = frozenset(string.ascii_lowercase) asciiUppercase = frozenset(string.ascii_uppercase) asciiLetters = frozenset(string.ascii_letters) digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) asciiUpper2Lower = dict([(ord(c), ord(c.lower())) for c in string.ascii_uppercase]) # Heading elements need to be ordered headingElements = ( "h1", "h2", "h3", "h4", "h5", "h6" ) voidElements = frozenset([ "base", "command", "event-source", "link", "meta", "hr", "br", "img", "embed", "param", "area", "col", "input", "source", "track" ]) cdataElements = frozenset(['title', 'textarea']) rcdataElements = frozenset([ 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript' ]) booleanAttributes = { "": frozenset(["irrelevant"]), "style": frozenset(["scoped"]), "img": frozenset(["ismap"]), "audio": frozenset(["autoplay", "controls"]), "video": frozenset(["autoplay", "controls"]), "script": frozenset(["defer", "async"]), "details": frozenset(["open"]), "datagrid": frozenset(["multiple", "disabled"]), "command": frozenset(["hidden", "disabled", "checked", "default"]), "hr": frozenset(["noshade"]), "menu": frozenset(["autosubmit"]), "fieldset": frozenset(["disabled", "readonly"]), "option": frozenset(["disabled", "readonly", "selected"]), "optgroup": frozenset(["disabled", "readonly"]), "button": frozenset(["disabled", "autofocus"]), "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), "output": frozenset(["disabled", "readonly"]), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( 8364, # 0x80 0x20AC EURO SIGN 65533, # 0x81 UNDEFINED 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS 8224, # 0x86 0x2020 DAGGER 8225, # 0x87 0x2021 DOUBLE DAGGER 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT 8240, # 0x89 0x2030 PER MILLE SIGN 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE 65533, # 0x8D UNDEFINED 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON 65533, # 0x8F UNDEFINED 65533, # 0x90 UNDEFINED 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK 8226, # 0x95 0x2022 BULLET 8211, # 0x96 0x2013 EN DASH 8212, # 0x97 0x2014 EM DASH 732, # 0x98 0x02DC SMALL TILDE 8482, # 0x99 0x2122 TRADE MARK SIGN 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE 65533, # 0x9D UNDEFINED 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) entities = { "AElig": "\xc6", "AElig;": "\xc6", "AMP": "&", "AMP;": "&", "Aacute": "\xc1", "Aacute;": "\xc1", "Abreve;": "\u0102", "Acirc": "\xc2", "Acirc;": "\xc2", "Acy;": "\u0410", "Afr;": "\U0001d504", "Agrave": "\xc0", "Agrave;": "\xc0", "Alpha;": "\u0391", "Amacr;": "\u0100", "And;": "\u2a53", "Aogon;": "\u0104", "Aopf;": "\U0001d538", "ApplyFunction;": "\u2061", "Aring": "\xc5", "Aring;": "\xc5", "Ascr;": "\U0001d49c", "Assign;": "\u2254", "Atilde": "\xc3", "Atilde;": "\xc3", "Auml": "\xc4", "Auml;": "\xc4", "Backslash;": "\u2216", "Barv;": "\u2ae7", "Barwed;": "\u2306", "Bcy;": "\u0411", "Because;": "\u2235", "Bernoullis;": "\u212c", "Beta;": "\u0392", "Bfr;": "\U0001d505", "Bopf;": "\U0001d539", "Breve;": "\u02d8", "Bscr;": "\u212c", "Bumpeq;": "\u224e", "CHcy;": "\u0427", "COPY": "\xa9", "COPY;": "\xa9", "Cacute;": "\u0106", "Cap;": "\u22d2", "CapitalDifferentialD;": "\u2145", "Cayleys;": "\u212d", "Ccaron;": "\u010c", "Ccedil": "\xc7", "Ccedil;": "\xc7", "Ccirc;": "\u0108", "Cconint;": "\u2230", "Cdot;": "\u010a", "Cedilla;": "\xb8", "CenterDot;": "\xb7", "Cfr;": "\u212d", "Chi;": "\u03a7", "CircleDot;": "\u2299", "CircleMinus;": "\u2296", "CirclePlus;": "\u2295", "CircleTimes;": "\u2297", "ClockwiseContourIntegral;": "\u2232", "CloseCurlyDoubleQuote;": "\u201d", "CloseCurlyQuote;": "\u2019", "Colon;": "\u2237", "Colone;": "\u2a74", "Congruent;": "\u2261", "Conint;": "\u222f", "ContourIntegral;": "\u222e", "Copf;": "\u2102", "Coproduct;": "\u2210", "CounterClockwiseContourIntegral;": "\u2233", "Cross;": "\u2a2f", "Cscr;": "\U0001d49e", "Cup;": "\u22d3", "CupCap;": "\u224d", "DD;": "\u2145", "DDotrahd;": "\u2911", "DJcy;": "\u0402", "DScy;": "\u0405", "DZcy;": "\u040f", "Dagger;": "\u2021", "Darr;": "\u21a1", "Dashv;": "\u2ae4", "Dcaron;": "\u010e", "Dcy;": "\u0414", "Del;": "\u2207", "Delta;": "\u0394", "Dfr;": "\U0001d507", "DiacriticalAcute;": "\xb4", "DiacriticalDot;": "\u02d9", "DiacriticalDoubleAcute;": "\u02dd", "DiacriticalGrave;": "`", "DiacriticalTilde;": "\u02dc", "Diamond;": "\u22c4", "DifferentialD;": "\u2146", "Dopf;": "\U0001d53b", "Dot;": "\xa8", "DotDot;": "\u20dc", "DotEqual;": "\u2250", "DoubleContourIntegral;": "\u222f", "DoubleDot;": "\xa8", "DoubleDownArrow;": "\u21d3", "DoubleLeftArrow;": "\u21d0", "DoubleLeftRightArrow;": "\u21d4", "DoubleLeftTee;": "\u2ae4", "DoubleLongLeftArrow;": "\u27f8", "DoubleLongLeftRightArrow;": "\u27fa", "DoubleLongRightArrow;": "\u27f9", "DoubleRightArrow;": "\u21d2", "DoubleRightTee;": "\u22a8", "DoubleUpArrow;": "\u21d1", "DoubleUpDownArrow;": "\u21d5", "DoubleVerticalBar;": "\u2225", "DownArrow;": "\u2193", "DownArrowBar;": "\u2913", "DownArrowUpArrow;": "\u21f5", "DownBreve;": "\u0311", "DownLeftRightVector;": "\u2950", "DownLeftTeeVector;": "\u295e", "DownLeftVector;": "\u21bd", "DownLeftVectorBar;": "\u2956", "DownRightTeeVector;": "\u295f", "DownRightVector;": "\u21c1", "DownRightVectorBar;": "\u2957", "DownTee;": "\u22a4", "DownTeeArrow;": "\u21a7", "Downarrow;": "\u21d3", "Dscr;": "\U0001d49f", "Dstrok;": "\u0110", "ENG;": "\u014a", "ETH": "\xd0", "ETH;": "\xd0", "Eacute": "\xc9", "Eacute;": "\xc9", "Ecaron;": "\u011a", "Ecirc": "\xca", "Ecirc;": "\xca", "Ecy;": "\u042d", "Edot;": "\u0116", "Efr;": "\U0001d508", "Egrave": "\xc8", "Egrave;": "\xc8", "Element;": "\u2208", "Emacr;": "\u0112", "EmptySmallSquare;": "\u25fb", "EmptyVerySmallSquare;": "\u25ab", "Eogon;": "\u0118", "Eopf;": "\U0001d53c", "Epsilon;": "\u0395", "Equal;": "\u2a75", "EqualTilde;": "\u2242", "Equilibrium;": "\u21cc", "Escr;": "\u2130", "Esim;": "\u2a73", "Eta;": "\u0397", "Euml": "\xcb", "Euml;": "\xcb", "Exists;": "\u2203", "ExponentialE;": "\u2147", "Fcy;": "\u0424", "Ffr;": "\U0001d509", "FilledSmallSquare;": "\u25fc", "FilledVerySmallSquare;": "\u25aa", "Fopf;": "\U0001d53d", "ForAll;": "\u2200", "Fouriertrf;": "\u2131", "Fscr;": "\u2131", "GJcy;": "\u0403", "GT": ">", "GT;": ">", "Gamma;": "\u0393", "Gammad;": "\u03dc", "Gbreve;": "\u011e", "Gcedil;": "\u0122", "Gcirc;": "\u011c", "Gcy;": "\u0413", "Gdot;": "\u0120", "Gfr;": "\U0001d50a", "Gg;": "\u22d9", "Gopf;": "\U0001d53e", "GreaterEqual;": "\u2265", "GreaterEqualLess;": "\u22db", "GreaterFullEqual;": "\u2267", "GreaterGreater;": "\u2aa2", "GreaterLess;": "\u2277", "GreaterSlantEqual;": "\u2a7e", "GreaterTilde;": "\u2273", "Gscr;": "\U0001d4a2", "Gt;": "\u226b", "HARDcy;": "\u042a", "Hacek;": "\u02c7", "Hat;": "^", "Hcirc;": "\u0124", "Hfr;": "\u210c", "HilbertSpace;": "\u210b", "Hopf;": "\u210d", "HorizontalLine;": "\u2500", "Hscr;": "\u210b", "Hstrok;": "\u0126", "HumpDownHump;": "\u224e", "HumpEqual;": "\u224f", "IEcy;": "\u0415", "IJlig;": "\u0132", "IOcy;": "\u0401", "Iacute": "\xcd", "Iacute;": "\xcd", "Icirc": "\xce", "Icirc;": "\xce", "Icy;": "\u0418", "Idot;": "\u0130", "Ifr;": "\u2111", "Igrave": "\xcc", "Igrave;": "\xcc", "Im;": "\u2111", "Imacr;": "\u012a", "ImaginaryI;": "\u2148", "Implies;": "\u21d2", "Int;": "\u222c", "Integral;": "\u222b", "Intersection;": "\u22c2", "InvisibleComma;": "\u2063", "InvisibleTimes;": "\u2062", "Iogon;": "\u012e", "Iopf;": "\U0001d540", "Iota;": "\u0399", "Iscr;": "\u2110", "Itilde;": "\u0128", "Iukcy;": "\u0406", "Iuml": "\xcf", "Iuml;": "\xcf", "Jcirc;": "\u0134", "Jcy;": "\u0419", "Jfr;": "\U0001d50d", "Jopf;": "\U0001d541", "Jscr;": "\U0001d4a5", "Jsercy;": "\u0408", "Jukcy;": "\u0404", "KHcy;": "\u0425", "KJcy;": "\u040c", "Kappa;": "\u039a", "Kcedil;": "\u0136", "Kcy;": "\u041a", "Kfr;": "\U0001d50e", "Kopf;": "\U0001d542", "Kscr;": "\U0001d4a6", "LJcy;": "\u0409", "LT": "<", "LT;": "<", "Lacute;": "\u0139", "Lambda;": "\u039b", "Lang;": "\u27ea", "Laplacetrf;": "\u2112", "Larr;": "\u219e", "Lcaron;": "\u013d", "Lcedil;": "\u013b", "Lcy;": "\u041b", "LeftAngleBracket;": "\u27e8", "LeftArrow;": "\u2190", "LeftArrowBar;": "\u21e4", "LeftArrowRightArrow;": "\u21c6", "LeftCeiling;": "\u2308", "LeftDoubleBracket;": "\u27e6", "LeftDownTeeVector;": "\u2961", "LeftDownVector;": "\u21c3", "LeftDownVectorBar;": "\u2959", "LeftFloor;": "\u230a", "LeftRightArrow;": "\u2194", "LeftRightVector;": "\u294e", "LeftTee;": "\u22a3", "LeftTeeArrow;": "\u21a4", "LeftTeeVector;": "\u295a", "LeftTriangle;": "\u22b2", "LeftTriangleBar;": "\u29cf", "LeftTriangleEqual;": "\u22b4", "LeftUpDownVector;": "\u2951", "LeftUpTeeVector;": "\u2960", "LeftUpVector;": "\u21bf", "LeftUpVectorBar;": "\u2958", "LeftVector;": "\u21bc", "LeftVectorBar;": "\u2952", "Leftarrow;": "\u21d0", "Leftrightarrow;": "\u21d4", "LessEqualGreater;": "\u22da", "LessFullEqual;": "\u2266", "LessGreater;": "\u2276", "LessLess;": "\u2aa1", "LessSlantEqual;": "\u2a7d", "LessTilde;": "\u2272", "Lfr;": "\U0001d50f", "Ll;": "\u22d8", "Lleftarrow;": "\u21da", "Lmidot;": "\u013f", "LongLeftArrow;": "\u27f5", "LongLeftRightArrow;": "\u27f7", "LongRightArrow;": "\u27f6", "Longleftarrow;": "\u27f8", "Longleftrightarrow;": "\u27fa", "Longrightarrow;": "\u27f9", "Lopf;": "\U0001d543", "LowerLeftArrow;": "\u2199", "LowerRightArrow;": "\u2198", "Lscr;": "\u2112", "Lsh;": "\u21b0", "Lstrok;": "\u0141", "Lt;": "\u226a", "Map;": "\u2905", "Mcy;": "\u041c", "MediumSpace;": "\u205f", "Mellintrf;": "\u2133", "Mfr;": "\U0001d510", "MinusPlus;": "\u2213", "Mopf;": "\U0001d544", "Mscr;": "\u2133", "Mu;": "\u039c", "NJcy;": "\u040a", "Nacute;": "\u0143", "Ncaron;": "\u0147", "Ncedil;": "\u0145", "Ncy;": "\u041d", "NegativeMediumSpace;": "\u200b", "NegativeThickSpace;": "\u200b", "NegativeThinSpace;": "\u200b", "NegativeVeryThinSpace;": "\u200b", "NestedGreaterGreater;": "\u226b", "NestedLessLess;": "\u226a", "NewLine;": "\n", "Nfr;": "\U0001d511", "NoBreak;": "\u2060", "NonBreakingSpace;": "\xa0", "Nopf;": "\u2115", "Not;": "\u2aec", "NotCongruent;": "\u2262", "NotCupCap;": "\u226d", "NotDoubleVerticalBar;": "\u2226", "NotElement;": "\u2209", "NotEqual;": "\u2260", "NotEqualTilde;": "\u2242\u0338", "NotExists;": "\u2204", "NotGreater;": "\u226f", "NotGreaterEqual;": "\u2271", "NotGreaterFullEqual;": "\u2267\u0338", "NotGreaterGreater;": "\u226b\u0338", "NotGreaterLess;": "\u2279", "NotGreaterSlantEqual;": "\u2a7e\u0338", "NotGreaterTilde;": "\u2275", "NotHumpDownHump;": "\u224e\u0338", "NotHumpEqual;": "\u224f\u0338", "NotLeftTriangle;": "\u22ea", "NotLeftTriangleBar;": "\u29cf\u0338", "NotLeftTriangleEqual;": "\u22ec", "NotLess;": "\u226e", "NotLessEqual;": "\u2270", "NotLessGreater;": "\u2278", "NotLessLess;": "\u226a\u0338", "NotLessSlantEqual;": "\u2a7d\u0338", "NotLessTilde;": "\u2274", "NotNestedGreaterGreater;": "\u2aa2\u0338", "NotNestedLessLess;": "\u2aa1\u0338", "NotPrecedes;": "\u2280", "NotPrecedesEqual;": "\u2aaf\u0338", "NotPrecedesSlantEqual;": "\u22e0", "NotReverseElement;": "\u220c", "NotRightTriangle;": "\u22eb", "NotRightTriangleBar;": "\u29d0\u0338", "NotRightTriangleEqual;": "\u22ed", "NotSquareSubset;": "\u228f\u0338", "NotSquareSubsetEqual;": "\u22e2", "NotSquareSuperset;": "\u2290\u0338", "NotSquareSupersetEqual;": "\u22e3", "NotSubset;": "\u2282\u20d2", "NotSubsetEqual;": "\u2288", "NotSucceeds;": "\u2281", "NotSucceedsEqual;": "\u2ab0\u0338", "NotSucceedsSlantEqual;": "\u22e1", "NotSucceedsTilde;": "\u227f\u0338", "NotSuperset;": "\u2283\u20d2", "NotSupersetEqual;": "\u2289", "NotTilde;": "\u2241", "NotTildeEqual;": "\u2244", "NotTildeFullEqual;": "\u2247", "NotTildeTilde;": "\u2249", "NotVerticalBar;": "\u2224", "Nscr;": "\U0001d4a9", "Ntilde": "\xd1", "Ntilde;": "\xd1", "Nu;": "\u039d", "OElig;": "\u0152", "Oacute": "\xd3", "Oacute;": "\xd3", "Ocirc": "\xd4", "Ocirc;": "\xd4", "Ocy;": "\u041e", "Odblac;": "\u0150", "Ofr;": "\U0001d512", "Ograve": "\xd2", "Ograve;": "\xd2", "Omacr;": "\u014c", "Omega;": "\u03a9", "Omicron;": "\u039f", "Oopf;": "\U0001d546", "OpenCurlyDoubleQuote;": "\u201c", "OpenCurlyQuote;": "\u2018", "Or;": "\u2a54", "Oscr;": "\U0001d4aa", "Oslash": "\xd8", "Oslash;": "\xd8", "Otilde": "\xd5", "Otilde;": "\xd5", "Otimes;": "\u2a37", "Ouml": "\xd6", "Ouml;": "\xd6", "OverBar;": "\u203e", "OverBrace;": "\u23de", "OverBracket;": "\u23b4", "OverParenthesis;": "\u23dc", "PartialD;": "\u2202", "Pcy;": "\u041f", "Pfr;": "\U0001d513", "Phi;": "\u03a6", "Pi;": "\u03a0", "PlusMinus;": "\xb1", "Poincareplane;": "\u210c", "Popf;": "\u2119", "Pr;": "\u2abb", "Precedes;": "\u227a", "PrecedesEqual;": "\u2aaf", "PrecedesSlantEqual;": "\u227c", "PrecedesTilde;": "\u227e", "Prime;": "\u2033", "Product;": "\u220f", "Proportion;": "\u2237", "Proportional;": "\u221d", "Pscr;": "\U0001d4ab", "Psi;": "\u03a8", "QUOT": "\"", "QUOT;": "\"", "Qfr;": "\U0001d514", "Qopf;": "\u211a", "Qscr;": "\U0001d4ac", "RBarr;": "\u2910", "REG": "\xae", "REG;": "\xae", "Racute;": "\u0154", "Rang;": "\u27eb", "Rarr;": "\u21a0", "Rarrtl;": "\u2916", "Rcaron;": "\u0158", "Rcedil;": "\u0156", "Rcy;": "\u0420", "Re;": "\u211c", "ReverseElement;": "\u220b", "ReverseEquilibrium;": "\u21cb", "ReverseUpEquilibrium;": "\u296f", "Rfr;": "\u211c", "Rho;": "\u03a1", "RightAngleBracket;": "\u27e9", "RightArrow;": "\u2192", "RightArrowBar;": "\u21e5", "RightArrowLeftArrow;": "\u21c4", "RightCeiling;": "\u2309", "RightDoubleBracket;": "\u27e7", "RightDownTeeVector;": "\u295d", "RightDownVector;": "\u21c2", "RightDownVectorBar;": "\u2955", "RightFloor;": "\u230b", "RightTee;": "\u22a2", "RightTeeArrow;": "\u21a6", "RightTeeVector;": "\u295b", "RightTriangle;": "\u22b3", "RightTriangleBar;": "\u29d0", "RightTriangleEqual;": "\u22b5", "RightUpDownVector;": "\u294f", "RightUpTeeVector;": "\u295c", "RightUpVector;": "\u21be", "RightUpVectorBar;": "\u2954", "RightVector;": "\u21c0", "RightVectorBar;": "\u2953", "Rightarrow;": "\u21d2", "Ropf;": "\u211d", "RoundImplies;": "\u2970", "Rrightarrow;": "\u21db", "Rscr;": "\u211b", "Rsh;": "\u21b1", "RuleDelayed;": "\u29f4", "SHCHcy;": "\u0429", "SHcy;": "\u0428", "SOFTcy;": "\u042c", "Sacute;": "\u015a", "Sc;": "\u2abc", "Scaron;": "\u0160", "Scedil;": "\u015e", "Scirc;": "\u015c", "Scy;": "\u0421", "Sfr;": "\U0001d516", "ShortDownArrow;": "\u2193", "ShortLeftArrow;": "\u2190", "ShortRightArrow;": "\u2192", "ShortUpArrow;": "\u2191", "Sigma;": "\u03a3", "SmallCircle;": "\u2218", "Sopf;": "\U0001d54a", "Sqrt;": "\u221a", "Square;": "\u25a1", "SquareIntersection;": "\u2293", "SquareSubset;": "\u228f", "SquareSubsetEqual;": "\u2291", "SquareSuperset;": "\u2290", "SquareSupersetEqual;": "\u2292", "SquareUnion;": "\u2294", "Sscr;": "\U0001d4ae", "Star;": "\u22c6", "Sub;": "\u22d0", "Subset;": "\u22d0", "SubsetEqual;": "\u2286", "Succeeds;": "\u227b", "SucceedsEqual;": "\u2ab0", "SucceedsSlantEqual;": "\u227d", "SucceedsTilde;": "\u227f", "SuchThat;": "\u220b", "Sum;": "\u2211", "Sup;": "\u22d1", "Superset;": "\u2283", "SupersetEqual;": "\u2287", "Supset;": "\u22d1", "THORN": "\xde", "THORN;": "\xde", "TRADE;": "\u2122", "TSHcy;": "\u040b", "TScy;": "\u0426", "Tab;": "\t", "Tau;": "\u03a4", "Tcaron;": "\u0164", "Tcedil;": "\u0162", "Tcy;": "\u0422", "Tfr;": "\U0001d517", "Therefore;": "\u2234", "Theta;": "\u0398", "ThickSpace;": "\u205f\u200a", "ThinSpace;": "\u2009", "Tilde;": "\u223c", "TildeEqual;": "\u2243", "TildeFullEqual;": "\u2245", "TildeTilde;": "\u2248", "Topf;": "\U0001d54b", "TripleDot;": "\u20db", "Tscr;": "\U0001d4af", "Tstrok;": "\u0166", "Uacute": "\xda", "Uacute;": "\xda", "Uarr;": "\u219f", "Uarrocir;": "\u2949", "Ubrcy;": "\u040e", "Ubreve;": "\u016c", "Ucirc": "\xdb", "Ucirc;": "\xdb", "Ucy;": "\u0423", "Udblac;": "\u0170", "Ufr;": "\U0001d518", "Ugrave": "\xd9", "Ugrave;": "\xd9", "Umacr;": "\u016a", "UnderBar;": "_", "UnderBrace;": "\u23df", "UnderBracket;": "\u23b5", "UnderParenthesis;": "\u23dd", "Union;": "\u22c3", "UnionPlus;": "\u228e", "Uogon;": "\u0172", "Uopf;": "\U0001d54c", "UpArrow;": "\u2191", "UpArrowBar;": "\u2912", "UpArrowDownArrow;": "\u21c5", "UpDownArrow;": "\u2195", "UpEquilibrium;": "\u296e", "UpTee;": "\u22a5", "UpTeeArrow;": "\u21a5", "Uparrow;": "\u21d1", "Updownarrow;": "\u21d5", "UpperLeftArrow;": "\u2196", "UpperRightArrow;": "\u2197", "Upsi;": "\u03d2", "Upsilon;": "\u03a5", "Uring;": "\u016e", "Uscr;": "\U0001d4b0", "Utilde;": "\u0168", "Uuml": "\xdc", "Uuml;": "\xdc", "VDash;": "\u22ab", "Vbar;": "\u2aeb", "Vcy;": "\u0412", "Vdash;": "\u22a9", "Vdashl;": "\u2ae6", "Vee;": "\u22c1", "Verbar;": "\u2016", "Vert;": "\u2016", "VerticalBar;": "\u2223", "VerticalLine;": "|", "VerticalSeparator;": "\u2758", "VerticalTilde;": "\u2240", "VeryThinSpace;": "\u200a", "Vfr;": "\U0001d519", "Vopf;": "\U0001d54d", "Vscr;": "\U0001d4b1", "Vvdash;": "\u22aa", "Wcirc;": "\u0174", "Wedge;": "\u22c0", "Wfr;": "\U0001d51a", "Wopf;": "\U0001d54e", "Wscr;": "\U0001d4b2", "Xfr;": "\U0001d51b", "Xi;": "\u039e", "Xopf;": "\U0001d54f", "Xscr;": "\U0001d4b3", "YAcy;": "\u042f", "YIcy;": "\u0407", "YUcy;": "\u042e", "Yacute": "\xdd", "Yacute;": "\xdd", "Ycirc;": "\u0176", "Ycy;": "\u042b", "Yfr;": "\U0001d51c", "Yopf;": "\U0001d550", "Yscr;": "\U0001d4b4", "Yuml;": "\u0178", "ZHcy;": "\u0416", "Zacute;": "\u0179", "Zcaron;": "\u017d", "Zcy;": "\u0417", "Zdot;": "\u017b", "ZeroWidthSpace;": "\u200b", "Zeta;": "\u0396", "Zfr;": "\u2128", "Zopf;": "\u2124", "Zscr;": "\U0001d4b5", "aacute": "\xe1", "aacute;": "\xe1", "abreve;": "\u0103", "ac;": "\u223e", "acE;": "\u223e\u0333", "acd;": "\u223f", "acirc": "\xe2", "acirc;": "\xe2", "acute": "\xb4", "acute;": "\xb4", "acy;": "\u0430", "aelig": "\xe6", "aelig;": "\xe6", "af;": "\u2061", "afr;": "\U0001d51e", "agrave": "\xe0", "agrave;": "\xe0", "alefsym;": "\u2135", "aleph;": "\u2135", "alpha;": "\u03b1", "amacr;": "\u0101", "amalg;": "\u2a3f", "amp": "&", "amp;": "&", "and;": "\u2227", "andand;": "\u2a55", "andd;": "\u2a5c", "andslope;": "\u2a58", "andv;": "\u2a5a", "ang;": "\u2220", "ange;": "\u29a4", "angle;": "\u2220", "angmsd;": "\u2221", "angmsdaa;": "\u29a8", "angmsdab;": "\u29a9", "angmsdac;": "\u29aa", "angmsdad;": "\u29ab", "angmsdae;": "\u29ac", "angmsdaf;": "\u29ad", "angmsdag;": "\u29ae", "angmsdah;": "\u29af", "angrt;": "\u221f", "angrtvb;": "\u22be", "angrtvbd;": "\u299d", "angsph;": "\u2222", "angst;": "\xc5", "angzarr;": "\u237c", "aogon;": "\u0105", "aopf;": "\U0001d552", "ap;": "\u2248", "apE;": "\u2a70", "apacir;": "\u2a6f", "ape;": "\u224a", "apid;": "\u224b", "apos;": "'", "approx;": "\u2248", "approxeq;": "\u224a", "aring": "\xe5", "aring;": "\xe5", "ascr;": "\U0001d4b6", "ast;": "*", "asymp;": "\u2248", "asympeq;": "\u224d", "atilde": "\xe3", "atilde;": "\xe3", "auml": "\xe4", "auml;": "\xe4", "awconint;": "\u2233", "awint;": "\u2a11", "bNot;": "\u2aed", "backcong;": "\u224c", "backepsilon;": "\u03f6", "backprime;": "\u2035", "backsim;": "\u223d", "backsimeq;": "\u22cd", "barvee;": "\u22bd", "barwed;": "\u2305", "barwedge;": "\u2305", "bbrk;": "\u23b5", "bbrktbrk;": "\u23b6", "bcong;": "\u224c", "bcy;": "\u0431", "bdquo;": "\u201e", "becaus;": "\u2235", "because;": "\u2235", "bemptyv;": "\u29b0", "bepsi;": "\u03f6", "bernou;": "\u212c", "beta;": "\u03b2", "beth;": "\u2136", "between;": "\u226c", "bfr;": "\U0001d51f", "bigcap;": "\u22c2", "bigcirc;": "\u25ef", "bigcup;": "\u22c3", "bigodot;": "\u2a00", "bigoplus;": "\u2a01", "bigotimes;": "\u2a02", "bigsqcup;": "\u2a06", "bigstar;": "\u2605", "bigtriangledown;": "\u25bd", "bigtriangleup;": "\u25b3", "biguplus;": "\u2a04", "bigvee;": "\u22c1", "bigwedge;": "\u22c0", "bkarow;": "\u290d", "blacklozenge;": "\u29eb", "blacksquare;": "\u25aa", "blacktriangle;": "\u25b4", "blacktriangledown;": "\u25be", "blacktriangleleft;": "\u25c2", "blacktriangleright;": "\u25b8", "blank;": "\u2423", "blk12;": "\u2592", "blk14;": "\u2591", "blk34;": "\u2593", "block;": "\u2588", "bne;": "=\u20e5", "bnequiv;": "\u2261\u20e5", "bnot;": "\u2310", "bopf;": "\U0001d553", "bot;": "\u22a5", "bottom;": "\u22a5", "bowtie;": "\u22c8", "boxDL;": "\u2557", "boxDR;": "\u2554", "boxDl;": "\u2556", "boxDr;": "\u2553", "boxH;": "\u2550", "boxHD;": "\u2566", "boxHU;": "\u2569", "boxHd;": "\u2564", "boxHu;": "\u2567", "boxUL;": "\u255d", "boxUR;": "\u255a", "boxUl;": "\u255c", "boxUr;": "\u2559", "boxV;": "\u2551", "boxVH;": "\u256c", "boxVL;": "\u2563", "boxVR;": "\u2560", "boxVh;": "\u256b", "boxVl;": "\u2562", "boxVr;": "\u255f", "boxbox;": "\u29c9", "boxdL;": "\u2555", "boxdR;": "\u2552", "boxdl;": "\u2510", "boxdr;": "\u250c", "boxh;": "\u2500", "boxhD;": "\u2565", "boxhU;": "\u2568", "boxhd;": "\u252c", "boxhu;": "\u2534", "boxminus;": "\u229f", "boxplus;": "\u229e", "boxtimes;": "\u22a0", "boxuL;": "\u255b", "boxuR;": "\u2558", "boxul;": "\u2518", "boxur;": "\u2514", "boxv;": "\u2502", "boxvH;": "\u256a", "boxvL;": "\u2561", "boxvR;": "\u255e", "boxvh;": "\u253c", "boxvl;": "\u2524", "boxvr;": "\u251c", "bprime;": "\u2035", "breve;": "\u02d8", "brvbar": "\xa6", "brvbar;": "\xa6", "bscr;": "\U0001d4b7", "bsemi;": "\u204f", "bsim;": "\u223d", "bsime;": "\u22cd", "bsol;": "\\", "bsolb;": "\u29c5", "bsolhsub;": "\u27c8", "bull;": "\u2022", "bullet;": "\u2022", "bump;": "\u224e", "bumpE;": "\u2aae", "bumpe;": "\u224f", "bumpeq;": "\u224f", "cacute;": "\u0107", "cap;": "\u2229", "capand;": "\u2a44", "capbrcup;": "\u2a49", "capcap;": "\u2a4b", "capcup;": "\u2a47", "capdot;": "\u2a40", "caps;": "\u2229\ufe00", "caret;": "\u2041", "caron;": "\u02c7", "ccaps;": "\u2a4d", "ccaron;": "\u010d", "ccedil": "\xe7", "ccedil;": "\xe7", "ccirc;": "\u0109", "ccups;": "\u2a4c", "ccupssm;": "\u2a50", "cdot;": "\u010b", "cedil": "\xb8", "cedil;": "\xb8", "cemptyv;": "\u29b2", "cent": "\xa2", "cent;": "\xa2", "centerdot;": "\xb7", "cfr;": "\U0001d520", "chcy;": "\u0447", "check;": "\u2713", "checkmark;": "\u2713", "chi;": "\u03c7", "cir;": "\u25cb", "cirE;": "\u29c3", "circ;": "\u02c6", "circeq;": "\u2257", "circlearrowleft;": "\u21ba", "circlearrowright;": "\u21bb", "circledR;": "\xae", "circledS;": "\u24c8", "circledast;": "\u229b", "circledcirc;": "\u229a", "circleddash;": "\u229d", "cire;": "\u2257", "cirfnint;": "\u2a10", "cirmid;": "\u2aef", "cirscir;": "\u29c2", "clubs;": "\u2663", "clubsuit;": "\u2663", "colon;": ":", "colone;": "\u2254", "coloneq;": "\u2254", "comma;": ",", "commat;": "@", "comp;": "\u2201", "compfn;": "\u2218", "complement;": "\u2201", "complexes;": "\u2102", "cong;": "\u2245", "congdot;": "\u2a6d", "conint;": "\u222e", "copf;": "\U0001d554", "coprod;": "\u2210", "copy": "\xa9", "copy;": "\xa9", "copysr;": "\u2117", "crarr;": "\u21b5", "cross;": "\u2717", "cscr;": "\U0001d4b8", "csub;": "\u2acf", "csube;": "\u2ad1", "csup;": "\u2ad0", "csupe;": "\u2ad2", "ctdot;": "\u22ef", "cudarrl;": "\u2938", "cudarrr;": "\u2935", "cuepr;": "\u22de", "cuesc;": "\u22df", "cularr;": "\u21b6", "cularrp;": "\u293d", "cup;": "\u222a", "cupbrcap;": "\u2a48", "cupcap;": "\u2a46", "cupcup;": "\u2a4a", "cupdot;": "\u228d", "cupor;": "\u2a45", "cups;": "\u222a\ufe00", "curarr;": "\u21b7", "curarrm;": "\u293c", "curlyeqprec;": "\u22de", "curlyeqsucc;": "\u22df", "curlyvee;": "\u22ce", "curlywedge;": "\u22cf", "curren": "\xa4", "curren;": "\xa4", "curvearrowleft;": "\u21b6", "curvearrowright;": "\u21b7", "cuvee;": "\u22ce", "cuwed;": "\u22cf", "cwconint;": "\u2232", "cwint;": "\u2231", "cylcty;": "\u232d", "dArr;": "\u21d3", "dHar;": "\u2965", "dagger;": "\u2020", "daleth;": "\u2138", "darr;": "\u2193", "dash;": "\u2010", "dashv;": "\u22a3", "dbkarow;": "\u290f", "dblac;": "\u02dd", "dcaron;": "\u010f", "dcy;": "\u0434", "dd;": "\u2146", "ddagger;": "\u2021", "ddarr;": "\u21ca", "ddotseq;": "\u2a77", "deg": "\xb0", "deg;": "\xb0", "delta;": "\u03b4", "demptyv;": "\u29b1", "dfisht;": "\u297f", "dfr;": "\U0001d521", "dharl;": "\u21c3", "dharr;": "\u21c2", "diam;": "\u22c4", "diamond;": "\u22c4", "diamondsuit;": "\u2666", "diams;": "\u2666", "die;": "\xa8", "digamma;": "\u03dd", "disin;": "\u22f2", "div;": "\xf7", "divide": "\xf7", "divide;": "\xf7", "divideontimes;": "\u22c7", "divonx;": "\u22c7", "djcy;": "\u0452", "dlcorn;": "\u231e", "dlcrop;": "\u230d", "dollar;": "$", "dopf;": "\U0001d555", "dot;": "\u02d9", "doteq;": "\u2250", "doteqdot;": "\u2251", "dotminus;": "\u2238", "dotplus;": "\u2214", "dotsquare;": "\u22a1", "doublebarwedge;": "\u2306", "downarrow;": "\u2193", "downdownarrows;": "\u21ca", "downharpoonleft;": "\u21c3", "downharpoonright;": "\u21c2", "drbkarow;": "\u2910", "drcorn;": "\u231f", "drcrop;": "\u230c", "dscr;": "\U0001d4b9", "dscy;": "\u0455", "dsol;": "\u29f6", "dstrok;": "\u0111", "dtdot;": "\u22f1", "dtri;": "\u25bf", "dtrif;": "\u25be", "duarr;": "\u21f5", "duhar;": "\u296f", "dwangle;": "\u29a6", "dzcy;": "\u045f", "dzigrarr;": "\u27ff", "eDDot;": "\u2a77", "eDot;": "\u2251", "eacute": "\xe9", "eacute;": "\xe9", "easter;": "\u2a6e", "ecaron;": "\u011b", "ecir;": "\u2256", "ecirc": "\xea", "ecirc;": "\xea", "ecolon;": "\u2255", "ecy;": "\u044d", "edot;": "\u0117", "ee;": "\u2147", "efDot;": "\u2252", "efr;": "\U0001d522", "eg;": "\u2a9a", "egrave": "\xe8", "egrave;": "\xe8", "egs;": "\u2a96", "egsdot;": "\u2a98", "el;": "\u2a99", "elinters;": "\u23e7", "ell;": "\u2113", "els;": "\u2a95", "elsdot;": "\u2a97", "emacr;": "\u0113", "empty;": "\u2205", "emptyset;": "\u2205", "emptyv;": "\u2205", "emsp13;": "\u2004", "emsp14;": "\u2005", "emsp;": "\u2003", "eng;": "\u014b", "ensp;": "\u2002", "eogon;": "\u0119", "eopf;": "\U0001d556", "epar;": "\u22d5", "eparsl;": "\u29e3", "eplus;": "\u2a71", "epsi;": "\u03b5", "epsilon;": "\u03b5", "epsiv;": "\u03f5", "eqcirc;": "\u2256", "eqcolon;": "\u2255", "eqsim;": "\u2242", "eqslantgtr;": "\u2a96", "eqslantless;": "\u2a95", "equals;": "=", "equest;": "\u225f", "equiv;": "\u2261", "equivDD;": "\u2a78", "eqvparsl;": "\u29e5", "erDot;": "\u2253", "erarr;": "\u2971", "escr;": "\u212f", "esdot;": "\u2250", "esim;": "\u2242", "eta;": "\u03b7", "eth": "\xf0", "eth;": "\xf0", "euml": "\xeb", "euml;": "\xeb", "euro;": "\u20ac", "excl;": "!", "exist;": "\u2203", "expectation;": "\u2130", "exponentiale;": "\u2147", "fallingdotseq;": "\u2252", "fcy;": "\u0444", "female;": "\u2640", "ffilig;": "\ufb03", "fflig;": "\ufb00", "ffllig;": "\ufb04", "ffr;": "\U0001d523", "filig;": "\ufb01", "fjlig;": "fj", "flat;": "\u266d", "fllig;": "\ufb02", "fltns;": "\u25b1", "fnof;": "\u0192", "fopf;": "\U0001d557", "forall;": "\u2200", "fork;": "\u22d4", "forkv;": "\u2ad9", "fpartint;": "\u2a0d", "frac12": "\xbd", "frac12;": "\xbd", "frac13;": "\u2153", "frac14": "\xbc", "frac14;": "\xbc", "frac15;": "\u2155", "frac16;": "\u2159", "frac18;": "\u215b", "frac23;": "\u2154", "frac25;": "\u2156", "frac34": "\xbe", "frac34;": "\xbe", "frac35;": "\u2157", "frac38;": "\u215c", "frac45;": "\u2158", "frac56;": "\u215a", "frac58;": "\u215d", "frac78;": "\u215e", "frasl;": "\u2044", "frown;": "\u2322", "fscr;": "\U0001d4bb", "gE;": "\u2267", "gEl;": "\u2a8c", "gacute;": "\u01f5", "gamma;": "\u03b3", "gammad;": "\u03dd", "gap;": "\u2a86", "gbreve;": "\u011f", "gcirc;": "\u011d", "gcy;": "\u0433", "gdot;": "\u0121", "ge;": "\u2265", "gel;": "\u22db", "geq;": "\u2265", "geqq;": "\u2267", "geqslant;": "\u2a7e", "ges;": "\u2a7e", "gescc;": "\u2aa9", "gesdot;": "\u2a80", "gesdoto;": "\u2a82", "gesdotol;": "\u2a84", "gesl;": "\u22db\ufe00", "gesles;": "\u2a94", "gfr;": "\U0001d524", "gg;": "\u226b", "ggg;": "\u22d9", "gimel;": "\u2137", "gjcy;": "\u0453", "gl;": "\u2277", "glE;": "\u2a92", "gla;": "\u2aa5", "glj;": "\u2aa4", "gnE;": "\u2269", "gnap;": "\u2a8a", "gnapprox;": "\u2a8a", "gne;": "\u2a88", "gneq;": "\u2a88", "gneqq;": "\u2269", "gnsim;": "\u22e7", "gopf;": "\U0001d558", "grave;": "`", "gscr;": "\u210a", "gsim;": "\u2273", "gsime;": "\u2a8e", "gsiml;": "\u2a90", "gt": ">", "gt;": ">", "gtcc;": "\u2aa7", "gtcir;": "\u2a7a", "gtdot;": "\u22d7", "gtlPar;": "\u2995", "gtquest;": "\u2a7c", "gtrapprox;": "\u2a86", "gtrarr;": "\u2978", "gtrdot;": "\u22d7", "gtreqless;": "\u22db", "gtreqqless;": "\u2a8c", "gtrless;": "\u2277", "gtrsim;": "\u2273", "gvertneqq;": "\u2269\ufe00", "gvnE;": "\u2269\ufe00", "hArr;": "\u21d4", "hairsp;": "\u200a", "half;": "\xbd", "hamilt;": "\u210b", "hardcy;": "\u044a", "harr;": "\u2194", "harrcir;": "\u2948", "harrw;": "\u21ad", "hbar;": "\u210f", "hcirc;": "\u0125", "hearts;": "\u2665", "heartsuit;": "\u2665", "hellip;": "\u2026", "hercon;": "\u22b9", "hfr;": "\U0001d525", "hksearow;": "\u2925", "hkswarow;": "\u2926", "hoarr;": "\u21ff", "homtht;": "\u223b", "hookleftarrow;": "\u21a9", "hookrightarrow;": "\u21aa", "hopf;": "\U0001d559", "horbar;": "\u2015", "hscr;": "\U0001d4bd", "hslash;": "\u210f", "hstrok;": "\u0127", "hybull;": "\u2043", "hyphen;": "\u2010", "iacute": "\xed", "iacute;": "\xed", "ic;": "\u2063", "icirc": "\xee", "icirc;": "\xee", "icy;": "\u0438", "iecy;": "\u0435", "iexcl": "\xa1", "iexcl;": "\xa1", "iff;": "\u21d4", "ifr;": "\U0001d526", "igrave": "\xec", "igrave;": "\xec", "ii;": "\u2148", "iiiint;": "\u2a0c", "iiint;": "\u222d", "iinfin;": "\u29dc", "iiota;": "\u2129", "ijlig;": "\u0133", "imacr;": "\u012b", "image;": "\u2111", "imagline;": "\u2110", "imagpart;": "\u2111", "imath;": "\u0131", "imof;": "\u22b7", "imped;": "\u01b5", "in;": "\u2208", "incare;": "\u2105", "infin;": "\u221e", "infintie;": "\u29dd", "inodot;": "\u0131", "int;": "\u222b", "intcal;": "\u22ba", "integers;": "\u2124", "intercal;": "\u22ba", "intlarhk;": "\u2a17", "intprod;": "\u2a3c", "iocy;": "\u0451", "iogon;": "\u012f", "iopf;": "\U0001d55a", "iota;": "\u03b9", "iprod;": "\u2a3c", "iquest": "\xbf", "iquest;": "\xbf", "iscr;": "\U0001d4be", "isin;": "\u2208", "isinE;": "\u22f9", "isindot;": "\u22f5", "isins;": "\u22f4", "isinsv;": "\u22f3", "isinv;": "\u2208", "it;": "\u2062", "itilde;": "\u0129", "iukcy;": "\u0456", "iuml": "\xef", "iuml;": "\xef", "jcirc;": "\u0135", "jcy;": "\u0439", "jfr;": "\U0001d527", "jmath;": "\u0237", "jopf;": "\U0001d55b", "jscr;": "\U0001d4bf", "jsercy;": "\u0458", "jukcy;": "\u0454", "kappa;": "\u03ba", "kappav;": "\u03f0", "kcedil;": "\u0137", "kcy;": "\u043a", "kfr;": "\U0001d528", "kgreen;": "\u0138", "khcy;": "\u0445", "kjcy;": "\u045c", "kopf;": "\U0001d55c", "kscr;": "\U0001d4c0", "lAarr;": "\u21da", "lArr;": "\u21d0", "lAtail;": "\u291b", "lBarr;": "\u290e", "lE;": "\u2266", "lEg;": "\u2a8b", "lHar;": "\u2962", "lacute;": "\u013a", "laemptyv;": "\u29b4", "lagran;": "\u2112", "lambda;": "\u03bb", "lang;": "\u27e8", "langd;": "\u2991", "langle;": "\u27e8", "lap;": "\u2a85", "laquo": "\xab", "laquo;": "\xab", "larr;": "\u2190", "larrb;": "\u21e4", "larrbfs;": "\u291f", "larrfs;": "\u291d", "larrhk;": "\u21a9", "larrlp;": "\u21ab", "larrpl;": "\u2939", "larrsim;": "\u2973", "larrtl;": "\u21a2", "lat;": "\u2aab", "latail;": "\u2919", "late;": "\u2aad", "lates;": "\u2aad\ufe00", "lbarr;": "\u290c", "lbbrk;": "\u2772", "lbrace;": "{", "lbrack;": "[", "lbrke;": "\u298b", "lbrksld;": "\u298f", "lbrkslu;": "\u298d", "lcaron;": "\u013e", "lcedil;": "\u013c", "lceil;": "\u2308", "lcub;": "{", "lcy;": "\u043b", "ldca;": "\u2936", "ldquo;": "\u201c", "ldquor;": "\u201e", "ldrdhar;": "\u2967", "ldrushar;": "\u294b", "ldsh;": "\u21b2", "le;": "\u2264", "leftarrow;": "\u2190", "leftarrowtail;": "\u21a2", "leftharpoondown;": "\u21bd", "leftharpoonup;": "\u21bc", "leftleftarrows;": "\u21c7", "leftrightarrow;": "\u2194", "leftrightarrows;": "\u21c6", "leftrightharpoons;": "\u21cb", "leftrightsquigarrow;": "\u21ad", "leftthreetimes;": "\u22cb", "leg;": "\u22da", "leq;": "\u2264", "leqq;": "\u2266", "leqslant;": "\u2a7d", "les;": "\u2a7d", "lescc;": "\u2aa8", "lesdot;": "\u2a7f", "lesdoto;": "\u2a81", "lesdotor;": "\u2a83", "lesg;": "\u22da\ufe00", "lesges;": "\u2a93", "lessapprox;": "\u2a85", "lessdot;": "\u22d6", "lesseqgtr;": "\u22da", "lesseqqgtr;": "\u2a8b", "lessgtr;": "\u2276", "lesssim;": "\u2272", "lfisht;": "\u297c", "lfloor;": "\u230a", "lfr;": "\U0001d529", "lg;": "\u2276", "lgE;": "\u2a91", "lhard;": "\u21bd", "lharu;": "\u21bc", "lharul;": "\u296a", "lhblk;": "\u2584", "ljcy;": "\u0459", "ll;": "\u226a", "llarr;": "\u21c7", "llcorner;": "\u231e", "llhard;": "\u296b", "lltri;": "\u25fa", "lmidot;": "\u0140", "lmoust;": "\u23b0", "lmoustache;": "\u23b0", "lnE;": "\u2268", "lnap;": "\u2a89", "lnapprox;": "\u2a89", "lne;": "\u2a87", "lneq;": "\u2a87", "lneqq;": "\u2268", "lnsim;": "\u22e6", "loang;": "\u27ec", "loarr;": "\u21fd", "lobrk;": "\u27e6", "longleftarrow;": "\u27f5", "longleftrightarrow;": "\u27f7", "longmapsto;": "\u27fc", "longrightarrow;": "\u27f6", "looparrowleft;": "\u21ab", "looparrowright;": "\u21ac", "lopar;": "\u2985", "lopf;": "\U0001d55d", "loplus;": "\u2a2d", "lotimes;": "\u2a34", "lowast;": "\u2217", "lowbar;": "_", "loz;": "\u25ca", "lozenge;": "\u25ca", "lozf;": "\u29eb", "lpar;": "(", "lparlt;": "\u2993", "lrarr;": "\u21c6", "lrcorner;": "\u231f", "lrhar;": "\u21cb", "lrhard;": "\u296d", "lrm;": "\u200e", "lrtri;": "\u22bf", "lsaquo;": "\u2039", "lscr;": "\U0001d4c1", "lsh;": "\u21b0", "lsim;": "\u2272", "lsime;": "\u2a8d", "lsimg;": "\u2a8f", "lsqb;": "[", "lsquo;": "\u2018", "lsquor;": "\u201a", "lstrok;": "\u0142", "lt": "<", "lt;": "<", "ltcc;": "\u2aa6", "ltcir;": "\u2a79", "ltdot;": "\u22d6", "lthree;": "\u22cb", "ltimes;": "\u22c9", "ltlarr;": "\u2976", "ltquest;": "\u2a7b", "ltrPar;": "\u2996", "ltri;": "\u25c3", "ltrie;": "\u22b4", "ltrif;": "\u25c2", "lurdshar;": "\u294a", "luruhar;": "\u2966", "lvertneqq;": "\u2268\ufe00", "lvnE;": "\u2268\ufe00", "mDDot;": "\u223a", "macr": "\xaf", "macr;": "\xaf", "male;": "\u2642", "malt;": "\u2720", "maltese;": "\u2720", "map;": "\u21a6", "mapsto;": "\u21a6", "mapstodown;": "\u21a7", "mapstoleft;": "\u21a4", "mapstoup;": "\u21a5", "marker;": "\u25ae", "mcomma;": "\u2a29", "mcy;": "\u043c", "mdash;": "\u2014", "measuredangle;": "\u2221", "mfr;": "\U0001d52a", "mho;": "\u2127", "micro": "\xb5", "micro;": "\xb5", "mid;": "\u2223", "midast;": "*", "midcir;": "\u2af0", "middot": "\xb7", "middot;": "\xb7", "minus;": "\u2212", "minusb;": "\u229f", "minusd;": "\u2238", "minusdu;": "\u2a2a", "mlcp;": "\u2adb", "mldr;": "\u2026", "mnplus;": "\u2213", "models;": "\u22a7", "mopf;": "\U0001d55e", "mp;": "\u2213", "mscr;": "\U0001d4c2", "mstpos;": "\u223e", "mu;": "\u03bc", "multimap;": "\u22b8", "mumap;": "\u22b8", "nGg;": "\u22d9\u0338", "nGt;": "\u226b\u20d2", "nGtv;": "\u226b\u0338", "nLeftarrow;": "\u21cd", "nLeftrightarrow;": "\u21ce", "nLl;": "\u22d8\u0338", "nLt;": "\u226a\u20d2", "nLtv;": "\u226a\u0338", "nRightarrow;": "\u21cf", "nVDash;": "\u22af", "nVdash;": "\u22ae", "nabla;": "\u2207", "nacute;": "\u0144", "nang;": "\u2220\u20d2", "nap;": "\u2249", "napE;": "\u2a70\u0338", "napid;": "\u224b\u0338", "napos;": "\u0149", "napprox;": "\u2249", "natur;": "\u266e", "natural;": "\u266e", "naturals;": "\u2115", "nbsp": "\xa0", "nbsp;": "\xa0", "nbump;": "\u224e\u0338", "nbumpe;": "\u224f\u0338", "ncap;": "\u2a43", "ncaron;": "\u0148", "ncedil;": "\u0146", "ncong;": "\u2247", "ncongdot;": "\u2a6d\u0338", "ncup;": "\u2a42", "ncy;": "\u043d", "ndash;": "\u2013", "ne;": "\u2260", "neArr;": "\u21d7", "nearhk;": "\u2924", "nearr;": "\u2197", "nearrow;": "\u2197", "nedot;": "\u2250\u0338", "nequiv;": "\u2262", "nesear;": "\u2928", "nesim;": "\u2242\u0338", "nexist;": "\u2204", "nexists;": "\u2204", "nfr;": "\U0001d52b", "ngE;": "\u2267\u0338", "nge;": "\u2271", "ngeq;": "\u2271", "ngeqq;": "\u2267\u0338", "ngeqslant;": "\u2a7e\u0338", "nges;": "\u2a7e\u0338", "ngsim;": "\u2275", "ngt;": "\u226f", "ngtr;": "\u226f", "nhArr;": "\u21ce", "nharr;": "\u21ae", "nhpar;": "\u2af2", "ni;": "\u220b", "nis;": "\u22fc", "nisd;": "\u22fa", "niv;": "\u220b", "njcy;": "\u045a", "nlArr;": "\u21cd", "nlE;": "\u2266\u0338", "nlarr;": "\u219a", "nldr;": "\u2025", "nle;": "\u2270", "nleftarrow;": "\u219a", "nleftrightarrow;": "\u21ae", "nleq;": "\u2270", "nleqq;": "\u2266\u0338", "nleqslant;": "\u2a7d\u0338", "nles;": "\u2a7d\u0338", "nless;": "\u226e", "nlsim;": "\u2274", "nlt;": "\u226e", "nltri;": "\u22ea", "nltrie;": "\u22ec", "nmid;": "\u2224", "nopf;": "\U0001d55f", "not": "\xac", "not;": "\xac", "notin;": "\u2209", "notinE;": "\u22f9\u0338", "notindot;": "\u22f5\u0338", "notinva;": "\u2209", "notinvb;": "\u22f7", "notinvc;": "\u22f6", "notni;": "\u220c", "notniva;": "\u220c", "notnivb;": "\u22fe", "notnivc;": "\u22fd", "npar;": "\u2226", "nparallel;": "\u2226", "nparsl;": "\u2afd\u20e5", "npart;": "\u2202\u0338", "npolint;": "\u2a14", "npr;": "\u2280", "nprcue;": "\u22e0", "npre;": "\u2aaf\u0338", "nprec;": "\u2280", "npreceq;": "\u2aaf\u0338", "nrArr;": "\u21cf", "nrarr;": "\u219b", "nrarrc;": "\u2933\u0338", "nrarrw;": "\u219d\u0338", "nrightarrow;": "\u219b", "nrtri;": "\u22eb", "nrtrie;": "\u22ed", "nsc;": "\u2281", "nsccue;": "\u22e1", "nsce;": "\u2ab0\u0338", "nscr;": "\U0001d4c3", "nshortmid;": "\u2224", "nshortparallel;": "\u2226", "nsim;": "\u2241", "nsime;": "\u2244", "nsimeq;": "\u2244", "nsmid;": "\u2224", "nspar;": "\u2226", "nsqsube;": "\u22e2", "nsqsupe;": "\u22e3", "nsub;": "\u2284", "nsubE;": "\u2ac5\u0338", "nsube;": "\u2288", "nsubset;": "\u2282\u20d2", "nsubseteq;": "\u2288", "nsubseteqq;": "\u2ac5\u0338", "nsucc;": "\u2281", "nsucceq;": "\u2ab0\u0338", "nsup;": "\u2285", "nsupE;": "\u2ac6\u0338", "nsupe;": "\u2289", "nsupset;": "\u2283\u20d2", "nsupseteq;": "\u2289", "nsupseteqq;": "\u2ac6\u0338", "ntgl;": "\u2279", "ntilde": "\xf1", "ntilde;": "\xf1", "ntlg;": "\u2278", "ntriangleleft;": "\u22ea", "ntrianglelefteq;": "\u22ec", "ntriangleright;": "\u22eb", "ntrianglerighteq;": "\u22ed", "nu;": "\u03bd", "num;": "#", "numero;": "\u2116", "numsp;": "\u2007", "nvDash;": "\u22ad", "nvHarr;": "\u2904", "nvap;": "\u224d\u20d2", "nvdash;": "\u22ac", "nvge;": "\u2265\u20d2", "nvgt;": ">\u20d2", "nvinfin;": "\u29de", "nvlArr;": "\u2902", "nvle;": "\u2264\u20d2", "nvlt;": "<\u20d2", "nvltrie;": "\u22b4\u20d2", "nvrArr;": "\u2903", "nvrtrie;": "\u22b5\u20d2", "nvsim;": "\u223c\u20d2", "nwArr;": "\u21d6", "nwarhk;": "\u2923", "nwarr;": "\u2196", "nwarrow;": "\u2196", "nwnear;": "\u2927", "oS;": "\u24c8", "oacute": "\xf3", "oacute;": "\xf3", "oast;": "\u229b", "ocir;": "\u229a", "ocirc": "\xf4", "ocirc;": "\xf4", "ocy;": "\u043e", "odash;": "\u229d", "odblac;": "\u0151", "odiv;": "\u2a38", "odot;": "\u2299", "odsold;": "\u29bc", "oelig;": "\u0153", "ofcir;": "\u29bf", "ofr;": "\U0001d52c", "ogon;": "\u02db", "ograve": "\xf2", "ograve;": "\xf2", "ogt;": "\u29c1", "ohbar;": "\u29b5", "ohm;": "\u03a9", "oint;": "\u222e", "olarr;": "\u21ba", "olcir;": "\u29be", "olcross;": "\u29bb", "oline;": "\u203e", "olt;": "\u29c0", "omacr;": "\u014d", "omega;": "\u03c9", "omicron;": "\u03bf", "omid;": "\u29b6", "ominus;": "\u2296", "oopf;": "\U0001d560", "opar;": "\u29b7", "operp;": "\u29b9", "oplus;": "\u2295", "or;": "\u2228", "orarr;": "\u21bb", "ord;": "\u2a5d", "order;": "\u2134", "orderof;": "\u2134", "ordf": "\xaa", "ordf;": "\xaa", "ordm": "\xba", "ordm;": "\xba", "origof;": "\u22b6", "oror;": "\u2a56", "orslope;": "\u2a57", "orv;": "\u2a5b", "oscr;": "\u2134", "oslash": "\xf8", "oslash;": "\xf8", "osol;": "\u2298", "otilde": "\xf5", "otilde;": "\xf5", "otimes;": "\u2297", "otimesas;": "\u2a36", "ouml": "\xf6", "ouml;": "\xf6", "ovbar;": "\u233d", "par;": "\u2225", "para": "\xb6", "para;": "\xb6", "parallel;": "\u2225", "parsim;": "\u2af3", "parsl;": "\u2afd", "part;": "\u2202", "pcy;": "\u043f", "percnt;": "%", "period;": ".", "permil;": "\u2030", "perp;": "\u22a5", "pertenk;": "\u2031", "pfr;": "\U0001d52d", "phi;": "\u03c6", "phiv;": "\u03d5", "phmmat;": "\u2133", "phone;": "\u260e", "pi;": "\u03c0", "pitchfork;": "\u22d4", "piv;": "\u03d6", "planck;": "\u210f", "planckh;": "\u210e", "plankv;": "\u210f", "plus;": "+", "plusacir;": "\u2a23", "plusb;": "\u229e", "pluscir;": "\u2a22", "plusdo;": "\u2214", "plusdu;": "\u2a25", "pluse;": "\u2a72", "plusmn": "\xb1", "plusmn;": "\xb1", "plussim;": "\u2a26", "plustwo;": "\u2a27", "pm;": "\xb1", "pointint;": "\u2a15", "popf;": "\U0001d561", "pound": "\xa3", "pound;": "\xa3", "pr;": "\u227a", "prE;": "\u2ab3", "prap;": "\u2ab7", "prcue;": "\u227c", "pre;": "\u2aaf", "prec;": "\u227a", "precapprox;": "\u2ab7", "preccurlyeq;": "\u227c", "preceq;": "\u2aaf", "precnapprox;": "\u2ab9", "precneqq;": "\u2ab5", "precnsim;": "\u22e8", "precsim;": "\u227e", "prime;": "\u2032", "primes;": "\u2119", "prnE;": "\u2ab5", "prnap;": "\u2ab9", "prnsim;": "\u22e8", "prod;": "\u220f", "profalar;": "\u232e", "profline;": "\u2312", "profsurf;": "\u2313", "prop;": "\u221d", "propto;": "\u221d", "prsim;": "\u227e", "prurel;": "\u22b0", "pscr;": "\U0001d4c5", "psi;": "\u03c8", "puncsp;": "\u2008", "qfr;": "\U0001d52e", "qint;": "\u2a0c", "qopf;": "\U0001d562", "qprime;": "\u2057", "qscr;": "\U0001d4c6", "quaternions;": "\u210d", "quatint;": "\u2a16", "quest;": "?", "questeq;": "\u225f", "quot": "\"", "quot;": "\"", "rAarr;": "\u21db", "rArr;": "\u21d2", "rAtail;": "\u291c", "rBarr;": "\u290f", "rHar;": "\u2964", "race;": "\u223d\u0331", "racute;": "\u0155", "radic;": "\u221a", "raemptyv;": "\u29b3", "rang;": "\u27e9", "rangd;": "\u2992", "range;": "\u29a5", "rangle;": "\u27e9", "raquo": "\xbb", "raquo;": "\xbb", "rarr;": "\u2192", "rarrap;": "\u2975", "rarrb;": "\u21e5", "rarrbfs;": "\u2920", "rarrc;": "\u2933", "rarrfs;": "\u291e", "rarrhk;": "\u21aa", "rarrlp;": "\u21ac", "rarrpl;": "\u2945", "rarrsim;": "\u2974", "rarrtl;": "\u21a3", "rarrw;": "\u219d", "ratail;": "\u291a", "ratio;": "\u2236", "rationals;": "\u211a", "rbarr;": "\u290d", "rbbrk;": "\u2773", "rbrace;": "}", "rbrack;": "]", "rbrke;": "\u298c", "rbrksld;": "\u298e", "rbrkslu;": "\u2990", "rcaron;": "\u0159", "rcedil;": "\u0157", "rceil;": "\u2309", "rcub;": "}", "rcy;": "\u0440", "rdca;": "\u2937", "rdldhar;": "\u2969", "rdquo;": "\u201d", "rdquor;": "\u201d", "rdsh;": "\u21b3", "real;": "\u211c", "realine;": "\u211b", "realpart;": "\u211c", "reals;": "\u211d", "rect;": "\u25ad", "reg": "\xae", "reg;": "\xae", "rfisht;": "\u297d", "rfloor;": "\u230b", "rfr;": "\U0001d52f", "rhard;": "\u21c1", "rharu;": "\u21c0", "rharul;": "\u296c", "rho;": "\u03c1", "rhov;": "\u03f1", "rightarrow;": "\u2192", "rightarrowtail;": "\u21a3", "rightharpoondown;": "\u21c1", "rightharpoonup;": "\u21c0", "rightleftarrows;": "\u21c4", "rightleftharpoons;": "\u21cc", "rightrightarrows;": "\u21c9", "rightsquigarrow;": "\u219d", "rightthreetimes;": "\u22cc", "ring;": "\u02da", "risingdotseq;": "\u2253", "rlarr;": "\u21c4", "rlhar;": "\u21cc", "rlm;": "\u200f", "rmoust;": "\u23b1", "rmoustache;": "\u23b1", "rnmid;": "\u2aee", "roang;": "\u27ed", "roarr;": "\u21fe", "robrk;": "\u27e7", "ropar;": "\u2986", "ropf;": "\U0001d563", "roplus;": "\u2a2e", "rotimes;": "\u2a35", "rpar;": ")", "rpargt;": "\u2994", "rppolint;": "\u2a12", "rrarr;": "\u21c9", "rsaquo;": "\u203a", "rscr;": "\U0001d4c7", "rsh;": "\u21b1", "rsqb;": "]", "rsquo;": "\u2019", "rsquor;": "\u2019", "rthree;": "\u22cc", "rtimes;": "\u22ca", "rtri;": "\u25b9", "rtrie;": "\u22b5", "rtrif;": "\u25b8", "rtriltri;": "\u29ce", "ruluhar;": "\u2968", "rx;": "\u211e", "sacute;": "\u015b", "sbquo;": "\u201a", "sc;": "\u227b", "scE;": "\u2ab4", "scap;": "\u2ab8", "scaron;": "\u0161", "sccue;": "\u227d", "sce;": "\u2ab0", "scedil;": "\u015f", "scirc;": "\u015d", "scnE;": "\u2ab6", "scnap;": "\u2aba", "scnsim;": "\u22e9", "scpolint;": "\u2a13", "scsim;": "\u227f", "scy;": "\u0441", "sdot;": "\u22c5", "sdotb;": "\u22a1", "sdote;": "\u2a66", "seArr;": "\u21d8", "searhk;": "\u2925", "searr;": "\u2198", "searrow;": "\u2198", "sect": "\xa7", "sect;": "\xa7", "semi;": ";", "seswar;": "\u2929", "setminus;": "\u2216", "setmn;": "\u2216", "sext;": "\u2736", "sfr;": "\U0001d530", "sfrown;": "\u2322", "sharp;": "\u266f", "shchcy;": "\u0449", "shcy;": "\u0448", "shortmid;": "\u2223", "shortparallel;": "\u2225", "shy": "\xad", "shy;": "\xad", "sigma;": "\u03c3", "sigmaf;": "\u03c2", "sigmav;": "\u03c2", "sim;": "\u223c", "simdot;": "\u2a6a", "sime;": "\u2243", "simeq;": "\u2243", "simg;": "\u2a9e", "simgE;": "\u2aa0", "siml;": "\u2a9d", "simlE;": "\u2a9f", "simne;": "\u2246", "simplus;": "\u2a24", "simrarr;": "\u2972", "slarr;": "\u2190", "smallsetminus;": "\u2216", "smashp;": "\u2a33", "smeparsl;": "\u29e4", "smid;": "\u2223", "smile;": "\u2323", "smt;": "\u2aaa", "smte;": "\u2aac", "smtes;": "\u2aac\ufe00", "softcy;": "\u044c", "sol;": "/", "solb;": "\u29c4", "solbar;": "\u233f", "sopf;": "\U0001d564", "spades;": "\u2660", "spadesuit;": "\u2660", "spar;": "\u2225", "sqcap;": "\u2293", "sqcaps;": "\u2293\ufe00", "sqcup;": "\u2294", "sqcups;": "\u2294\ufe00", "sqsub;": "\u228f", "sqsube;": "\u2291", "sqsubset;": "\u228f", "sqsubseteq;": "\u2291", "sqsup;": "\u2290", "sqsupe;": "\u2292", "sqsupset;": "\u2290", "sqsupseteq;": "\u2292", "squ;": "\u25a1", "square;": "\u25a1", "squarf;": "\u25aa", "squf;": "\u25aa", "srarr;": "\u2192", "sscr;": "\U0001d4c8", "ssetmn;": "\u2216", "ssmile;": "\u2323", "sstarf;": "\u22c6", "star;": "\u2606", "starf;": "\u2605", "straightepsilon;": "\u03f5", "straightphi;": "\u03d5", "strns;": "\xaf", "sub;": "\u2282", "subE;": "\u2ac5", "subdot;": "\u2abd", "sube;": "\u2286", "subedot;": "\u2ac3", "submult;": "\u2ac1", "subnE;": "\u2acb", "subne;": "\u228a", "subplus;": "\u2abf", "subrarr;": "\u2979", "subset;": "\u2282", "subseteq;": "\u2286", "subseteqq;": "\u2ac5", "subsetneq;": "\u228a", "subsetneqq;": "\u2acb", "subsim;": "\u2ac7", "subsub;": "\u2ad5", "subsup;": "\u2ad3", "succ;": "\u227b", "succapprox;": "\u2ab8", "succcurlyeq;": "\u227d", "succeq;": "\u2ab0", "succnapprox;": "\u2aba", "succneqq;": "\u2ab6", "succnsim;": "\u22e9", "succsim;": "\u227f", "sum;": "\u2211", "sung;": "\u266a", "sup1": "\xb9", "sup1;": "\xb9", "sup2": "\xb2", "sup2;": "\xb2", "sup3": "\xb3", "sup3;": "\xb3", "sup;": "\u2283", "supE;": "\u2ac6", "supdot;": "\u2abe", "supdsub;": "\u2ad8", "supe;": "\u2287", "supedot;": "\u2ac4", "suphsol;": "\u27c9", "suphsub;": "\u2ad7", "suplarr;": "\u297b", "supmult;": "\u2ac2", "supnE;": "\u2acc", "supne;": "\u228b", "supplus;": "\u2ac0", "supset;": "\u2283", "supseteq;": "\u2287", "supseteqq;": "\u2ac6", "supsetneq;": "\u228b", "supsetneqq;": "\u2acc", "supsim;": "\u2ac8", "supsub;": "\u2ad4", "supsup;": "\u2ad6", "swArr;": "\u21d9", "swarhk;": "\u2926", "swarr;": "\u2199", "swarrow;": "\u2199", "swnwar;": "\u292a", "szlig": "\xdf", "szlig;": "\xdf", "target;": "\u2316", "tau;": "\u03c4", "tbrk;": "\u23b4", "tcaron;": "\u0165", "tcedil;": "\u0163", "tcy;": "\u0442", "tdot;": "\u20db", "telrec;": "\u2315", "tfr;": "\U0001d531", "there4;": "\u2234", "therefore;": "\u2234", "theta;": "\u03b8", "thetasym;": "\u03d1", "thetav;": "\u03d1", "thickapprox;": "\u2248", "thicksim;": "\u223c", "thinsp;": "\u2009", "thkap;": "\u2248", "thksim;": "\u223c", "thorn": "\xfe", "thorn;": "\xfe", "tilde;": "\u02dc", "times": "\xd7", "times;": "\xd7", "timesb;": "\u22a0", "timesbar;": "\u2a31", "timesd;": "\u2a30", "tint;": "\u222d", "toea;": "\u2928", "top;": "\u22a4", "topbot;": "\u2336", "topcir;": "\u2af1", "topf;": "\U0001d565", "topfork;": "\u2ada", "tosa;": "\u2929", "tprime;": "\u2034", "trade;": "\u2122", "triangle;": "\u25b5", "triangledown;": "\u25bf", "triangleleft;": "\u25c3", "trianglelefteq;": "\u22b4", "triangleq;": "\u225c", "triangleright;": "\u25b9", "trianglerighteq;": "\u22b5", "tridot;": "\u25ec", "trie;": "\u225c", "triminus;": "\u2a3a", "triplus;": "\u2a39", "trisb;": "\u29cd", "tritime;": "\u2a3b", "trpezium;": "\u23e2", "tscr;": "\U0001d4c9", "tscy;": "\u0446", "tshcy;": "\u045b", "tstrok;": "\u0167", "twixt;": "\u226c", "twoheadleftarrow;": "\u219e", "twoheadrightarrow;": "\u21a0", "uArr;": "\u21d1", "uHar;": "\u2963", "uacute": "\xfa", "uacute;": "\xfa", "uarr;": "\u2191", "ubrcy;": "\u045e", "ubreve;": "\u016d", "ucirc": "\xfb", "ucirc;": "\xfb", "ucy;": "\u0443", "udarr;": "\u21c5", "udblac;": "\u0171", "udhar;": "\u296e", "ufisht;": "\u297e", "ufr;": "\U0001d532", "ugrave": "\xf9", "ugrave;": "\xf9", "uharl;": "\u21bf", "uharr;": "\u21be", "uhblk;": "\u2580", "ulcorn;": "\u231c", "ulcorner;": "\u231c", "ulcrop;": "\u230f", "ultri;": "\u25f8", "umacr;": "\u016b", "uml": "\xa8", "uml;": "\xa8", "uogon;": "\u0173", "uopf;": "\U0001d566", "uparrow;": "\u2191", "updownarrow;": "\u2195", "upharpoonleft;": "\u21bf", "upharpoonright;": "\u21be", "uplus;": "\u228e", "upsi;": "\u03c5", "upsih;": "\u03d2", "upsilon;": "\u03c5", "upuparrows;": "\u21c8", "urcorn;": "\u231d", "urcorner;": "\u231d", "urcrop;": "\u230e", "uring;": "\u016f", "urtri;": "\u25f9", "uscr;": "\U0001d4ca", "utdot;": "\u22f0", "utilde;": "\u0169", "utri;": "\u25b5", "utrif;": "\u25b4", "uuarr;": "\u21c8", "uuml": "\xfc", "uuml;": "\xfc", "uwangle;": "\u29a7", "vArr;": "\u21d5", "vBar;": "\u2ae8", "vBarv;": "\u2ae9", "vDash;": "\u22a8", "vangrt;": "\u299c", "varepsilon;": "\u03f5", "varkappa;": "\u03f0", "varnothing;": "\u2205", "varphi;": "\u03d5", "varpi;": "\u03d6", "varpropto;": "\u221d", "varr;": "\u2195", "varrho;": "\u03f1", "varsigma;": "\u03c2", "varsubsetneq;": "\u228a\ufe00", "varsubsetneqq;": "\u2acb\ufe00", "varsupsetneq;": "\u228b\ufe00", "varsupsetneqq;": "\u2acc\ufe00", "vartheta;": "\u03d1", "vartriangleleft;": "\u22b2", "vartriangleright;": "\u22b3", "vcy;": "\u0432", "vdash;": "\u22a2", "vee;": "\u2228", "veebar;": "\u22bb", "veeeq;": "\u225a", "vellip;": "\u22ee", "verbar;": "|", "vert;": "|", "vfr;": "\U0001d533", "vltri;": "\u22b2", "vnsub;": "\u2282\u20d2", "vnsup;": "\u2283\u20d2", "vopf;": "\U0001d567", "vprop;": "\u221d", "vrtri;": "\u22b3", "vscr;": "\U0001d4cb", "vsubnE;": "\u2acb\ufe00", "vsubne;": "\u228a\ufe00", "vsupnE;": "\u2acc\ufe00", "vsupne;": "\u228b\ufe00", "vzigzag;": "\u299a", "wcirc;": "\u0175", "wedbar;": "\u2a5f", "wedge;": "\u2227", "wedgeq;": "\u2259", "weierp;": "\u2118", "wfr;": "\U0001d534", "wopf;": "\U0001d568", "wp;": "\u2118", "wr;": "\u2240", "wreath;": "\u2240", "wscr;": "\U0001d4cc", "xcap;": "\u22c2", "xcirc;": "\u25ef", "xcup;": "\u22c3", "xdtri;": "\u25bd", "xfr;": "\U0001d535", "xhArr;": "\u27fa", "xharr;": "\u27f7", "xi;": "\u03be", "xlArr;": "\u27f8", "xlarr;": "\u27f5", "xmap;": "\u27fc", "xnis;": "\u22fb", "xodot;": "\u2a00", "xopf;": "\U0001d569", "xoplus;": "\u2a01", "xotime;": "\u2a02", "xrArr;": "\u27f9", "xrarr;": "\u27f6", "xscr;": "\U0001d4cd", "xsqcup;": "\u2a06", "xuplus;": "\u2a04", "xutri;": "\u25b3", "xvee;": "\u22c1", "xwedge;": "\u22c0", "yacute": "\xfd", "yacute;": "\xfd", "yacy;": "\u044f", "ycirc;": "\u0177", "ycy;": "\u044b", "yen": "\xa5", "yen;": "\xa5", "yfr;": "\U0001d536", "yicy;": "\u0457", "yopf;": "\U0001d56a", "yscr;": "\U0001d4ce", "yucy;": "\u044e", "yuml": "\xff", "yuml;": "\xff", "zacute;": "\u017a", "zcaron;": "\u017e", "zcy;": "\u0437", "zdot;": "\u017c", "zeetrf;": "\u2128", "zeta;": "\u03b6", "zfr;": "\U0001d537", "zhcy;": "\u0436", "zigrarr;": "\u21dd", "zopf;": "\U0001d56b", "zscr;": "\U0001d4cf", "zwj;": "\u200d", "zwnj;": "\u200c", } replacementCharacters = { 0x0: "\uFFFD", 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", 0x85: "\u2026", 0x86: "\u2020", 0x87: "\u2021", 0x88: "\u02C6", 0x89: "\u2030", 0x8A: "\u0160", 0x8B: "\u2039", 0x8C: "\u0152", 0x8D: "\u008D", 0x8E: "\u017D", 0x8F: "\u008F", 0x90: "\u0090", 0x91: "\u2018", 0x92: "\u2019", 0x93: "\u201C", 0x94: "\u201D", 0x95: "\u2022", 0x96: "\u2013", 0x97: "\u2014", 0x98: "\u02DC", 0x99: "\u2122", 0x9A: "\u0161", 0x9B: "\u203A", 0x9C: "\u0153", 0x9D: "\u009D", 0x9E: "\u017E", 0x9F: "\u0178", } tokenTypes = { "Doctype": 0, "Characters": 1, "SpaceCharacters": 2, "StartTag": 3, "EndTag": 4, "EmptyTag": 5, "Comment": 6, "ParseError": 7 } tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"]]) prefixes = dict([(v, k) for k, v in namespaces.items()]) prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): pass class ReparseException(Exception): pass PK.e[u.--html5lib/serializer.pyonu[ abc@`sddlmZmZmZddlmZddlZddlmZm Z ddl m Z m Z m Z ddl mZmZmZddlmZmZdd lmZd je d Zejd ed Zejd edZiZeddkZxeejD]\Z Z!er<ee!dkse r[ee!dkr[qne!dkree!dkrej"e!Z!n e#e!Z!e!ekse j$re ee!`u[u]u_  /`  ᠎᠏           

   ]u􏿿iu&c C`st|ttfrqg}g}t}xt|j|j|j!D]\}}|rbt}qDn||j}tj |j|t |j|dg!rtj |j||d!}t }n t |}|j|qDWxz|D]r}tj|} | r<|jd|j| | jdsW|jdqWq|jdt|dqWdj||jfSt|SdS(Niu&u;u&#x%s;u(t isinstancetUnicodeEncodeErrortUnicodeTranslateErrortFalset enumeratetobjecttstarttendR tisSurrogatePairtmintsurrogatePairToCodepointtTruetordtappendt_encode_entity_maptgettendswiththextjoinR( texctrest codepointstskiptitctindext codepointtcpte((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pythtmlentityreplace_errors*s0) ,     uhtmlentityreplaceuetreecK`s1tj|}t|}|j|||S(N(R t getTreeWalkertHTMLSerializertrender(tinputttreetencodingtserializer_optstwalkerts((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt serializeJs R.cB`seZdZdZeZeZeZeZ eZ eZ eZ eZ eZeZeZeZdZdZdZdZddZddZddZRS(ulegacyu"uquote_attr_valuesu quote_charuuse_best_quote_charuomit_optional_tagsuminimize_boolean_attributesuuse_trailing_solidususpace_before_trailing_solidusuescape_lt_in_attrsu escape_rcdatauresolve_entitiesualphabetical_attributesuinject_meta_charsetustrip_whitespaceusanitizec K`st|t|j}t|dkrJtdtt|nd|krbt|_nx6|jD]+}t|||j |t ||qlWg|_ t|_ dS(u6 Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values="legacy"|"spec"|"always" Whether to quote attribute values that don't require quoting per legacy browser behaviour, when required by the standard, or always. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities < > & " ' are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example becomes . use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g.
. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g.
. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation iu2__init__() got an unexpected keyword argument '%s'u quote_charN( t frozensettoptionstlent TypeErrortnexttiterRtuse_best_quote_chartsetattrRtgetattrterrorststrict(tselftkwargstunexpected_argstattr((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt__init__ps.  ) cC`s$|jr|j|jdS|SdS(Nuhtmlentityreplace(R2tencode(RBtstring((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRGs cC`s$|jr|j|jdS|SdS(Nustrict(R2RG(RBRH((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt encodeStricts cc`sn||_t}g|_|rI|jrIddlm}|||}n|jrqddlm}||}n|jrddl m}||}n|j rddl m}||}n|j rddl m}||}nx~|D]v}|d}|dkrd|d}|dr9|d|d7}n|d rP|d 7}n|d r|d jd d kr|d jd d kr|jdnd }nd }|d||d |f7}n|d7}|j|Vq|d3kra|dks|rF|r1|djdd kr1|jdn|j|dVqf|jt|dVq|d4kr.|d} |jd| V| tkr|j rt}n|r|jdnx|djD] \\} } } | } | }|jdV|j| V|j sI| tj| tkr| tjdtkr|jdV|jdksxt|d krt}nZ|jdkrtj|dk }n3|jdkrt j|dk }n t!d |j"d!d"}|j#r |j"d#d$}n|r|j$}|j%rhd |krDd |krDd }qhd |krhd |krhd }qhn|d kr|j"d d%}n|j"d d&}|j|V|j|V|j|Vq|j|VqqW| t&kr|j'r|j(r |jd'Vq|jd(Vn|jdVq|d)kr|d} | tkrYt}n|ro|jdn|jd*| Vq|d+kr|d}|jd,d kr|jd-n|jd.|dVq|d/krU|d} | d0}|t)kr|jd1| n|j*r:|t+kr:t)|}n d2| }|j|Vq|j|dqWdS(5Ni(tFilterutypeuDoctypeu u CharactersuSpaceCharactersudatauuCommentu--uComment contains --u uEntityu;uEntity %s not recognizedu&%s;(u CharactersuSpaceCharacters(uStartTaguEmptyTag(,R2RR@tinject_meta_charsettfilters.inject_meta_charsetRJtalphabetical_attributestfilters.alphabeticalattributeststrip_whitespacetfilters.whitespacetsanitizetfilters.sanitizertomit_optional_tagstfilters.optionaltagstfindtserializeErrorRIRGRR t escape_rcdataRtitemstminimize_boolean_attributesRRttupletquote_attr_valuesR9t_quoteAttributeSpectsearchtNonet_quoteAttributeLegacyt ValueErrortreplacetescape_lt_in_attrst quote_charR=Rtuse_trailing_solidustspace_before_trailing_solidusR tresolve_entitiesR (RBt treewalkerR2tin_cdataRJttokenttypetdoctypeRctnamet_t attr_namet attr_valuetktvt quote_attrtdatatkey((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR6s                   # !                     cC`sE|r%djt|j||Sdjt|j|SdS(Ntu(R!tlistR6(RBRgR2((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR/?suXXX ERROR MESSAGE NEEDEDcC`s&|jj||jr"tndS(N(R@RRAtSerializeError(RBRs((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRVEs (uquote_attr_valuesu quote_charuuse_best_quote_charuomit_optional_tagsuminimize_boolean_attributesuuse_trailing_solidususpace_before_trailing_solidusuescape_lt_in_attrsu escape_rcdatauresolve_entitiesualphabetical_attributesuinject_meta_charsetustrip_whitespaceusanitizeN(t__name__t __module__R[RcRR=RSRYRRdReRbRWRfRMRKRORQR8RFRGRIR^R6R/RV(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR.Qs4 8   RwcB`seZdZRS(uError in serialized tree(RxRyt__doc__(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRwLs(,t __future__RRRtpip._vendor.sixRtretcodecsRRt constantsRRRR R R RuR R txml.sax.saxutilsRR!t_quoteAttributeSpecCharstcompileR\R_RR9t_is_ucs4RvRXRpRqRRtislowerR,R^R6RR.t ExceptionRw(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyts8       PK.e[zhtml5lib/_utils.pyonu[ abc@`sGddlmZmZmZddlZddlmZddlmZyddl j j Z Wn#e k rddlj jZ nXddddd d d gZejdd koejd dkZy.edZeeesedZnWn eZnXeZdefdYZdZdZdZdZdS(i(tabsolute_importtdivisiontunicode_literalsN(t ModuleType(t text_typeu default_etreeuMethodDispatcheruisSurrogatePairusurrogatePairToCodepointumoduleFactoryFactoryusupports_lone_surrogatesuPY27iiiu"\uD800"u u"\uD800"tMethodDispatchercB`s#eZdZddZdZRS(upDict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. cC`sg}xi|D]a\}}t|ttttfr[x7|D]}|j||fq;Wq |j||fq Wtj||d|_ dS(N( t isinstancetlistttuplet frozensettsettappendtdictt__init__tNonetdefault(tselftitemst _dictEntriestnametvaluetitem((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyR 4s cC`stj|||jS(N(R tgetR(Rtkey((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyt __getitem__Cs((t__name__t __module__t__doc__R R(((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyR's  cC`sht|dkogt|ddkogt|ddkogt|ddkogt|ddkS(Niiiiiii(tlentord(tdata((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pytisSurrogatePairJs,cC`s2dt|dddt|dd}|S(Niiiiii(R(Rtchar_val((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pytsurrogatePairToCodepointPsc`sifd}|S(Nc`sttjtdr(d|j}n d|j}t|j}y|||SWntk rt|}|||}|jj|dkri|s0    &   #  PK.e[GC/%%html5lib/html5parser.pyonu[ abc@`sddlmZmZmZddlmZmZmZddlZyddl m Z Wn!e k r{ddl m Z nXddl mZddl mZddl mZdd lmZdd l mZdd lmZmZmZmZmZmZmZmZmZmZmZm Z!m"Z"m#Z#m$Z$m%Z%d e&d Z'dd e&dZ(dZ)de*fdYZ+ej,dZ-dZ.de/e0dZ1de2fdYZ3dS(i(tabsolute_importtdivisiontunicode_literals(twith_metaclasstviewkeystPY3N(t OrderedDicti(t _inputstream(t _tokenizer(t treebuilders(tMarker(t_utils(tspaceCharacterstasciiUpper2LowertspecialElementstheadingElementst cdataElementstrcdataElementst tokenTypest tagTokenTypest namespacesthtmlIntegrationPointElementst"mathmlTextIntegrationPointElementstadjustForeignAttributestadjustMathMLAttributestadjustSVGAttributestEtReparseExceptionuetreecK`s1tj|}t|d|}|j||S(u.Parse a string or file-like object into a treetnamespaceHTMLElements(R tgetTreeBuildert HTMLParsertparse(tdoct treebuilderRtkwargsttbtp((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRsudivcK`s7tj|}t|d|}|j|d||S(NRt container(R RRt parseFragment(R R%R!RR"R#R$((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR&&sc`s dtffdY}|S(Nt Decoratedc`seZfdZRS(c`s^xE|jD]7\}}t|tjr:|}n|||tphasetinsertHtmlElementtresetInsertionModeR9t lastPhasetbeforeRCDataPhasetTruet framesetOK(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRI^s*           cC`s't|dsdS|jjjdjS(uThe name of the character encoding that was used to decode the input stream, or :obj:`None` if that is not determined yet. u tokenizeriN(thasattrR9RHRKt charEncodingRA(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytdocumentEncodingscC`se|jdkrK|jtdkrKd|jkoJ|jdjtdkS|j|jftkSdS(Nuannotation-xmlumathmluencodingu text/htmluapplication/xhtml+xml(u text/htmluapplication/xhtml+xml(RAt namespaceRt attributest translateR R(R?telement((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisHTMLIntegrationPoints cC`s|j|jftkS(N(RaRAR(R?Rd((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisMathMLTextIntegrationPointscC`s-td}td}td}td}td}td}td}x|jD]}d} |} x=| dk r| } |jjr|jjdnd} | r| jnd} | r| jnd} | d }||kr|j| d | jd id} qht |jjd ks| |jj ks|j | rx||krf|d t ddgks|||fks| t dkr| dkr||kr|d dks|j| r||||fkr|j}n |jd}||kr |j| } qh||kr)|j| } qh||krG|j| } qh||kre|j| } qh||kr|j| } qh||krh|j| } qhqhW||krS| drS| d rS|jdi| d d 6qSqSWt}g}x2|r(|j|j|jj}|rqqWdS(Nu CharactersuSpaceCharactersuStartTaguEndTaguCommentuDoctypeu ParseErroriutypeudataudatavarsiunameumglyphu malignmarkumathmluannotation-xmlusvguinForeignContentu selfClosinguselfClosingAcknowledgedu&non-void-element-with-trailing-solidus(RtnormalizedTokensR9R:t openElementsRaRAt parseErrortgettlentdefaultNamespaceRft frozensetRReRWR>tprocessCharacterstprocessSpaceCharacterstprocessStartTagt processEndTagtprocessCommenttprocessDoctypeR\tappendt processEOF(R?tCharactersTokentSpaceCharactersTokent StartTagTokent EndTagTokent CommentTokent DoctypeTokentParseErrorTokenttokent prev_tokent new_tokent currentNodetcurrentNodeNamespacetcurrentNodeNameR,RWt reprocessR>((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRJsp       "                  cc`s&x|jD]}|j|Vq WdS(N(RHtnormalizeToken(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRgscO`s&|j|td|||jjS(uParse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on N(RMRNR9R:t getDocument(R?RKtargsR"((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cO`s#|j|t|||jjS(u2Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on (RMR\R:t getFragment(R?RKRR"((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR&suXXX-undefined-errorcC`s^|dkri}n|jj|jjj||f|jrZtt||ndS(N( R9R;RtRHRKtpositionR8t ParseErrorR(R?t errorcodetdatavars((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRis   % cC`sr|dtdkrn|d}t||dRW(R?tlasttnewModestnodetnodeNamet new_phase((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRY!sB       cC`sc|jj||dkr1|jj|j_n|jj|j_|j|_|jd|_dS(uYGeneric RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT uRAWTEXTutextN( R:t insertElementRHRURTRSRWt originalPhaseR>(R?R}t contentType((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytparseRCDataRawtextMs   N(R5R6t__doc__R9RNR\RCRMRItpropertyR`ReRfRJRgRR&RiRRRRRRYR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8s& "  C        ,c`sTd}d}dt|||fdYdfdY}dfdY}d ffd Y}d ffd Y}d ffdY}dffdY}dffdY} dffdY} dffdY} dffdY} dffdY} dffdY}dffdY}dffd Y}d!ffd"Y}d#ffd$Y}d%ffd&Y}d'ffd(Y}d)ffd*Y}d+ffd,Y}d-ffd.Y}d/ffd0Y}d1ffd2Y}i|d36|d46|d56|d66|d76|d86| d96| d:6| d;6| d<6| d=6|d>6|d?6|d@6|dA6|dB6|dC6|dD6|dE6|dF6|dG6|dH6|dI6S(JNc`s2tdtjDfd}|S(u4Logger that records which phase processes each tokencs`s!|]\}}||fVqdS(N((t.0tkeytvalue((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pys csc`sjjdrt|dkr|d}yi|dd6}Wn nX|dtkru|d|dRW(R?R}RAtpublicIdtsystemIdtcorrect((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRss              cS`s&d|j_|jjd|j_dS(Nuquirksu beforeHtml(RDRQR>RW(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt anythingElses cS`s|jjd|j|S(Nuexpected-doctype-but-got-chars(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s,|jjdi|dd6|j|S(Nu"expected-doctype-but-got-start-taguname(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRps  cS`s,|jjdi|dd6|j|S(Nu expected-doctype-but-got-end-taguname(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqs  cS`s|jjd|jtS(Nuexpected-doctype-but-got-eof(RDRiRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu%s ( R5R6RoRrRsRRnRpRqRu(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs   _    tBeforeHtmlPhasecB`sGeZdZdZdZdZdZdZdZRS(cS`s3|jjtdd|jjd|j_dS(NuhtmluStartTagu beforeHead(R:t insertRoottimpliedTagTokenRDR>RW(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRX,scS`s|jtS(N(RXR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu1s cS`s|jj||jjdS(N(R:RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr5scS`sdS(N((R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo8scS`s|j|S(N(RX(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn;s cS`s-|ddkrt|j_n|j|S(Nunameuhtml(R\RDRORX(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRp?s cS`sC|ddkr1|jjdi|dd6n|j|SdS(Nunameuheadubodyuhtmlubruunexpected-end-tag-before-html(uheadubodyuhtmlubr(RDRiRX(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqEs   ( R5R6RXRuRrRoRnRpRq(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR*s      tBeforeHeadPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`s}j|||tjd|jfd|jfg|_|j|j_tjd|jfg|_ |j |j _dS(Nuhtmluheadubodyubr(uheadubodyuhtmlubr( RCR tMethodDispatcherRt startTagHeadRt startTagOthertdefaulttendTagImplyHeadRt endTagOther(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCNs cS`s|jtddtS(NuheaduStartTag(RRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu\scS`sdS(N((R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo`scS`s|jtdd|S(NuheaduStartTag(RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRncscS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRgscS`s@|jj||jjd|j_|jjd|j_dS(NiuinHead(R:RRht headPointerRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRjscS`s|jtdd|S(NuheaduStartTag(RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jtdd|S(NuheaduStartTag(RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRsscS`s"|jjdi|dd6dS(Nuend-tag-after-implied-rootuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRws ( R5R6RCRuRoRnRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRMs       t InHeadPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZRS(c `sj|||tjd|jfd|jfd|jfd|jfd|jfd|jfd |j fd |j fg|_ |j |j _ tjd |jfd|jfg|_|j|j_ dS(Nuhtmlutitleunoframesustyleunoscriptuscriptubaseubasefontubgsounducommandulinkumetauheadubrubody(unoframesustyle(ubaseubasefontubgsounducommandulink(ubruhtmlubody(RCR RRt startTagTitletstartTagNoFramesStyletstartTagNoscripttstartTagScripttstartTagBaseLinkCommandt startTagMetaRRRRt endTagHeadtendTagHtmlBodyBrRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC|s         cS`s|jtS(N(RR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjddS(Nu!two-heads-are-not-better-than-one(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s.|jj||jjjt|dRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sT|jj||jjj|jj_|jj|j_|jjd|j_dS(Nutext( R:RRDRHtscriptDataStateRTRWRR>(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s/|jjjj}|jjd|j_dS(Nu afterHead(RDR:RhRR>RW(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jtddS(Nuhead(RR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRuRnRRRRRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR{s               tInHeadNoscriptPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c`sj|||tjd|jfd |jfd |jfg|_|j|j_tjd |j fd |j fg|_ |j |j _dS( Nuhtmlubasefontubgsoundulinkumetaunoframesustyleuheadunoscriptubr(ubasefontubgsoundulinkumetaunoframesustyle(uheadunoscript( RCR RRRtstartTagHeadNoscriptRRRtendTagNoscripttendTagBrRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs   cS`s|jjd|jtS(Nueof-in-head-noscript(RDRiRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s|jjdj|S(NuinHead(RDR>Rr(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRrscS`s|jjd|j|S(Nuchar-in-head-noscript(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s|jjdj|S(NuinHead(RDR>Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s"|jjdi|dd6dS(Nuunexpected-start-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s,|jjdi|dd6|j|S(Nuunexpected-inhead-noscript-taguname(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s/|jjjj}|jjd|j_dS(NuinHead(RDR:RhRR>RW(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s,|jjdi|dd6|j|S(Nuunexpected-inhead-noscript-taguname(RDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jtddS(Nunoscript(RR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRuRrRnRoRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs           tAfterHeadPhasec`szeZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z RS( c `sj|||tjd|jfd|jfd|jfd|jfd |jfg|_|j |j_ tjd|j fg|_ |j |j _ dS(Nuhtmlubodyuframesetubaseubasefontubgsoundulinkumetaunoframesuscriptustyleutitleuheadubr( ubaseubasefontubgsoundulinkumetaunoframesuscriptustyleutitle(ubodyuhtmlubr(RCR RRt startTagBodytstartTagFramesettstartTagFromHeadRRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC#s     cS`s|jtS(N(RR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu4s cS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn8s cS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<scS`s6t|j_|jj||jjd|j_dS(NuinBody(RNRDR]R:RR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR?s cS`s*|jj||jjd|j_dS(Nu inFrameset(R:RRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRDscS`s|jjdi|dd6|jjj|jj|jjdj|xG|jjdddD],}|jdkrh|jjj |PqhqhWdS(Nu#unexpected-start-tag-out-of-my-headunameuinHeadiuhead( RDRiR:RhRtRR>RpRAtremove(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRHs  cS`s"|jjdi|dd6dS(Nuunexpected-start-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRRscS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRUs cS`s|j|S(N(R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRYs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]scS`s?|jjtdd|jjd|j_t|j_dS(NubodyuStartTaguinBody(R:RRRDR>RWR\R](R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR`s(R5R6RCRuRnRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR"s         t InBodyPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZ dZ!d Z"d!Z#d"Z$d#Z%d$Z&d%Z'd&Z(d'Z)d(Z*d)Z+d*Z,d+Z-d,Z.d-Z/d.Z0d/Z1d0Z2d1Z3d2Z4RS(3c,`sij||||j|_tjd|jfdd|jfd |jfd |jfde|j ft |j fdf|j fd&|j fdg|jfd*|jfd+|jfdh|jfd8|jfd9|jfdi|jfd=|jfd>|jfdj|jfdk|jfdH|jfdI|jfdJ|jfdK|jfdL|jfdM|jfdN|jfdl|j fdQ|j!fdm|j"fdn|j#fdV|j$fdW|j%fdo|j&fg!|_'|j(|j'_)tjd |j*fd|j+fdp|j,fd&|j-fd |j.fdq|j/ft |j0fdr|j1fds|j2fd@|j3fg |_4|j5|j4_)dS(tNuhtmlubaseubasefontubgsounducommandulinkumetauscriptustyleutitleubodyuframesetuaddressuarticleuasideu blockquoteucenterudetailsudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupumainumenuunavuolupusectionusummaryuulupreulistinguformuliuddudtu plaintextuaububigucodeuemufontuiususmallustrikeustronguttuuunobrubuttonuappletumarqueeuobjectuxmputableuareaubruembeduimgukeygenuwbruparamusourceutrackuinputuhruimageuisindexutextareauiframeunoscriptunoembedunoframesuselecturpurtuoptionuoptgroupumathusvgucaptionucolucolgroupuframeuheadutbodyutdutfootuthutheadutrudialog( ubaseubasefontubgsounducommandulinkumetauscriptustyleutitle(uaddressuarticleuasideu blockquoteucenterudetailsudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupumainumenuunavuolupusectionusummaryuul(upreulisting(uliuddudt( ububigucodeuemufontuiususmallustrikeustronguttuu(uappletumarqueeuobject(uareaubruembeduimgukeygenuwbr(uparamusourceutrack(unoembedunoframes(urpurt(uoptionuoptgroup( ucaptionucolucolgroupuframeuheadutbodyutdutfootuthutheadutr(uaddressuarticleuasideu blockquoteubuttonucenterudetailsudialogudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupulistingumainumenuunavuolupreusectionusummaryuul(uddudtuli(uaububigucodeuemufontuiunobrususmallustrikeustronguttuu(uappletumarqueeuobject(6RCtprocessSpaceCharactersNonPreRoR RRtstartTagProcessInHeadRRtstartTagClosePRtstartTagHeadingtstartTagPreListingt startTagFormtstartTagListItemtstartTagPlaintextt startTagAtstartTagFormattingt startTagNobrtstartTagButtontstartTagAppletMarqueeObjectt startTagXmpt startTagTabletstartTagVoidFormattingtstartTagParamSourcet startTagInputt startTagHrt startTagImagetstartTagIsIndextstartTagTextareatstartTagIFrameRtstartTagRawtexttstartTagSelectt startTagRpRtt startTagOptt startTagMatht startTagSvgtstartTagMisplacedRRRt endTagBodyt endTagHtmlt endTagBlockt endTagFormtendTagPtendTagListItemt endTagHeadingtendTagFormattingtendTagAppletMarqueeObjectRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRChs~                                          cS`s4|j|jko3|j|jko3|j|jkS(N(RARaRb(R?tnode1tnode2((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisMatchingFormattingElementscS`s|jj||jjd}g}xS|jjdddD]8}|tkrVPq@|j||r@|j|q@q@Wt|dkr|jjj|dn|jjj|dS(Nii( R:RRhtactiveFormattingElementsR RRtRkR(R?R}RdtmatchingElementsR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytaddFormattingElements  c S`sWtd}xD|jjddd D])}|j|kr&|jjdPq&q&WdS(Nuddudtuliuputbodyutdutfootuthutheadutrubodyuhtmliu expected-closing-tag-but-got-eof( uddudtuliuputbodyutdutfootuthutheadutrubodyuhtml(RmR:RhRARDRi(R?tallowed_elementsR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus  cS`s|d}|j|_|jdrb|jjdjdkrb|jjdj rb|d}n|r|jj|jj|ndS( Nudatau iupreulistingutextareai(upreulistingutextarea( RRoRR:RhRAt hasContentt#reconstructActiveFormattingElementsR(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt!processSpaceCharactersDropNewlines    cS`s}|ddkrdS|jj|jj|d|jjrytg|dD]}|tk^qOryt|j_ndS(Nudatau(R:R RRDR]tanyR RN(R?R}tchar((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns  #cS`s%|jj|jj|ddS(Nudata(R:R R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjdidd6t|jjdks|jjdjdkrNn`t|j_xQ|djD]?\}}||jjdj krk||jjdj |RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs1 'cS`sB|jjdddr.|jtdn|jj|dS(Nuptvariantubutton(R:telementInScopeRRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`sZ|jjdddr.|jtdn|jj|t|j_|j|_ dS(NupRubutton( R:RRRRRNRDR]R Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`s|jjr)|jjdidd6nT|jjdddrW|jtdn|jj||jjd|j_dS(Nuunexpected-start-taguformunameupRubuttoni( R:t formPointerRDRiRRRRRh(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`st|j_idgd6ddgd6ddgd6}||d}xnt|jjD]Z}|j|kr|jjjt |jdPn|j t krW|jd krWPqWqWW|jj dd d r|jjjt ddn|jj |dS( NuliudtuddunameuEndTaguaddressudivupRubutton(uaddressudivup(RNRDR]treversedR:RhRARWRqRt nameTupleRRR(R?R}t stopNamesMapt stopNamesR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs"     cS`sZ|jjdddr.|jtdn|jj||jjj|jj_dS(NupRubutton( R:RRRRRDRHRVRT(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR4scS`s|jjdddr.|jtdn|jjdjtkrx|jjdi|dd6|jjj n|jj |dS(NupRubuttoniuunexpected-start-taguname( R:RRRRhRARRDRiRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR:s cS`s|jjd}|r|jjdidd6dd6|jtd||jjkrt|jjj|n||jjkr|jjj|qn|jj |j |dS(Nuau$unexpected-start-tag-implies-end-tagu startNameuendName( R:t!elementInActiveFormattingElementsRDRiRRRhRRR R(R?R}t afeAElement((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRBs  cS`s|jj|j|dS(N(R:R R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyROs cS`st|jj|jjdrc|jjdidd6dd6|jtd|jjn|j|dS(Nunobru$unexpected-start-tag-implies-end-tagu startNameuendName(R:R RRDRiRqRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRSs  cS`sw|jjdrJ|jjdidd6dd6|jtd|S|jj|jj|t|j_ dS(Nubuttonu$unexpected-start-tag-implies-end-tagu startNameuendName( R:RRDRiRqRR RRNR](R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]s  cS`s@|jj|jj||jjjtt|j_dS(N( R:R RRRtR RNRDR](R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRhs cS`s^|jjdddr.|jtdn|jjt|j_|jj|ddS(NupRubuttonuRAWTEXT( R:RRRR RNRDR]R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns   cS`sy|jjdkrC|jjdddrC|jtdqCn|jj|t|j_|jj d|j_ dS(NuquirksupRubuttonuinTable( RDRQR:RRqRRRNR]R>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus  cS`sG|jj|jj||jjjt|d(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs      cS`s_|jjdrK|jj|jjdjdkrK|jjqKn|jj|dS(Nurubyi(R:RtgenerateImpliedEndTagsRhRARDRiR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`sv|jj|jj||jj|td|d<|jj||drr|jjjt |dRW(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR!s     cS`s-|jjdr)|jtd|SdS(Nubody(R:RRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR3scS`s|ddkr|j|_n|jj|d}|rK|jjn|jjdj|dkr|jjdi|dd6n|r|jjj }x,|j|dkr|jjj }qWndS(Nunameupreiuend-tag-too-early( RRoR:RRRhRARDRiR(R?R}tinScopeR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR9s!cS`s|jj}d|j_|dks7|jj| rT|jjdidd6nS|jj|jjd|kr|jjdidd6n|jjj|dS(Nuunexpected-end-taguformunameiuend-tag-too-early-ignored( R:RR9RRDRiRRhR(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRGs     cS`s|ddkrd}nd}|jj|dd|s\|jjdi|dd6n|jjd|d|jjdj|dkr|jjdi|dd6n|jjj}x)|j|dkr|jjj}qWdS( NunameuliulistRuunexpected-end-tagtexcludeiuend-tag-too-early( R9R:RRDRiRRhRAR(R?R}RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRTs ! cS`sx1tD])}|jj|r|jjPqqW|jjdj|dkrr|jjdi|dd6nx^tD]V}|jj|ry|jjj}x%|jtkr|jjj}qWPqyqyWdS(Niunameuend-tag-too-early( RR:RRRhRARDRiR(R?R}titem((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRes  ! cS`s3d}x&|dkr.|d7}|jj|d}| sd||jjkru|jj|j ru|j|dS||jjkr|jjdi|dd6|jjj |dS|jj|js|jjdi|dd6dS||jjdkr*|jjd i|dd6n|jjj |}d}x1|jj|D]}|j t krV|}PqVqVW|dkr|jjj}x"||kr|jjj}qW|jjj |dS|jj|d}|jjj |}|} } d} |jjj | } x| d kr9| d7} | d8} |jj| } | |jjkr|jjj | q$n| |krPn| |kr|jjj | d}n| j} | |jj|jjj | <| |jj|jjj | <| } | jr#| jj| n| j| | } q$W| jrV| jj| n|jtdkr|jj\}}|j| |n |j| |j} |j| |j| |jjj ||jjj|| |jjj ||jjj|jjj |d| q WdS(u)The much-feared adoption agency algorithmiiiunameNuadoption-agency-1.2uadoption-agency-4.4iuadoption-agency-1.3iutableutbodyutfootutheadutr(utableutbodyutfootutheadutr(R:RRhRRARRDRiRRtindexR9RRRt cloneNodeRRt appendChildRmtgetTableMisnestedNodePositiont insertBeforetreparentChildrentinsert(R?R}touterLoopCountertformattingElementtafeIndext furthestBlockRdtcommonAncestortbookmarktlastNodeRtinnerLoopCounterR"tcloneRR&((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRts   !                cS`s|jj|dr&|jjn|jjdj|dkrd|jjdi|dd6n|jj|dr|jjj}x)|j|dkr|jjj}qW|jjndS(Nunameiuend-tag-too-early( R:RRRhRARDRiRtclearActiveFormattingElements(R?R}Rd((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs!cS`s[|jjdidd6dd6|jj|jjtdd|jjjdS(Nuunexpected-end-tag-treated-asubru originalNameu br elementunewNameuStartTag(RDRiR:R RRRhR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR#s   cS`sx|jjdddD]}|j|dkr|jjd|d|jjdj|dkr|jjdi|dd6nx|jjj|krqWPq|jtkr|jjdi|dd6PqqWdS(NiunameR uunexpected-end-tag( R:RhRARRDRiRRR(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR*s !(5R5R6RCRRRuR RnRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyResfG                                  t TextPhasec`sDeZfdZdZdZdZdZdZRS(c`sej|||tjg|_|j|j_tjd|jfg|_|j|j_dS(Nuscript( RCR RRRRt endTagScriptRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC9s cS`s|jj|ddS(Nudata(R:R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnAscS`sM|jjdi|jjdjd6|jjj|jj|j_tS(Nu&expected-named-closing-tag-but-got-eofiuname( RDRiR:RhRARRRWR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuDs  cS`sdS(N((R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRKscS`s(|jjj}|jj|j_dS(N(R:RhRRDRRW(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR4NscS`s&|jjj|jj|j_dS(N(R:RhRRDRRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRUs(R5R6RCRnRuRR4R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR38s     t InTablePhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZdZdZRS(c `sj|||tjd|jfd|jfd|jfd|jfd|jfd|jfd |j fd|j fd|j fd|j fg |_ |j|j _tjd |jfd|jfg|_|j|j_dS(Nuhtmlucaptionucolgroupucolutbodyutfootutheadutduthutrutableustyleuscriptuinputuformubody(utbodyutfootuthead(utduthutr(ustyleuscript( ubodyucaptionucolucolgroupuhtmlutbodyutdutfootuthutheadutr(RCR RRtstartTagCaptiontstartTagColgroupt startTagColtstartTagRowGrouptstartTagImplyTbodyRtstartTagStyleScriptRRRRRt endTagTablet endTagIgnoreRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC[s$          cS`s4x-|jjdjdkr/|jjjqWdS(Niutableuhtml(utableuhtml(R:RhRAR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableContextsscS`s0|jjdjdkr,|jjdndS(Niuhtmlu eof-in-table(R:RhRARDRi(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu|scS`sH|jj}|jjd|j_||jj_|jjj|dS(Nu inTableText(RDRWR>RRo(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRos cS`sH|jj}|jjd|j_||jj_|jjj|dS(Nu inTableText(RDRWR>RRn(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s3t|j_|jjdj|t|j_dS(NuinBody(R\R:tinsertFromTableRDR>RnRN(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sG|j|jjjt|jj||jjd|j_dS(Nu inCaption( R>R:RRtR RRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR6s cS`s4|j|jj||jjd|j_dS(Nu inColumnGroup(R>R:RRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR7s cS`s|jtdd|S(NucolgroupuStartTag(R7R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8scS`s4|j|jj||jjd|j_dS(Nu inTableBody(R>R:RRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR9s cS`s|jtdd|S(NutbodyuStartTag(R9R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR:scS`sN|jjdidd6dd6|jjjtd|jjsJ|SdS(Nu$unexpected-start-tag-implies-end-tagutableu startNameuendName(RDRiRWRqRRL(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs   cS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR;scS`sqd|dkr`|ddjtdkr`|jjd|jj||jjjn |j|dS(Nutypeudatauhiddenu unexpected-hidden-input-in-table( RcR RDRiR:RRhRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s_|jjd|jjdkr[|jj||jjd|j_|jjjndS(Nuunexpected-form-in-tablei(RDRiR:RR9RRhR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sQ|jjdi|dd6t|j_|jjdj|t|j_dS(Nu)unexpected-start-tag-implies-table-voodoounameuinBody(RDRiR\R:R?R>RpRN(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdddr|jj|jjdjdkro|jjdidd6|jjdjd6nx-|jjdjdkr|jjjqrW|jjj|jjn |jjdS(NutableRiuend-tag-too-early-namedugotNameu expectedName( R:RRRhRARDRiRRY(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<s   cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=scS`sQ|jjdi|dd6t|j_|jjdj|t|j_dS(Nu'unexpected-end-tag-implies-table-voodoounameuinBody(RDRiR\R:R?R>RqRN(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs (R5R6RCR>RuRoRnRR6R7R8R9R:RR;RRRR<R=R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR5Ys&               tInTableTextPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`s)j|||d|_g|_dS(N(RCR9RtcharacterTokens(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs cS`sdjg|jD]}|d^q}tg|D]}|tk^q3ritdd6|d6}|jjdj|n|r|jj|ng|_dS(Nuudatau CharactersutypeuinTable( tjoinRAR R RRDR>RR:(R?R!RR}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytflushCharacterss)%cS`s|j|j|j_|S(N(RCRRDRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRrs cS`s|j|j|j_tS(N(RCRRDRWR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s(|ddkrdS|jj|dS(Nudatau(RARt(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s|jj|dS(N(RARt(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|j|j|j_|S(N(RCRRDRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRp s cS`s|j|j|j_|S(N(RCRRDRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqs ( R5R6RCRCRrRuRnRoRpRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR@s     tInCaptionPhasec`sheZfdZdZdZdZdZdZdZdZ dZ d Z RS( c `sj|||tjd|jfd |jfg|_|j|j_tjd|jfd |j fd|j fg|_ |j |j _dS(Nuhtmlucaptionucolucolgrouputbodyutdutfootuthutheadutrutableubody( ucaptionucolucolgrouputbodyutdutfootuthutheadutr( ubodyucolucolgroupuhtmlutbodyutdutfootuthutheadutr( RCR RRtstartTagTableElementRRRt endTagCaptionR<R=RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs   cS`s|jjddd S(NucaptionRutable(R:R(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagCaption+scS`s|jjdjdS(NuinBody(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu.scS`s|jjdj|S(NuinBody(RDR>Rn(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn1scS`s@|jj|j}|jjjtd|s<|SdS(Nucaption(RDRiRGRWRqR(R?R}t ignoreEndTag((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRE4s   cS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<scS`s|js|jj|jjdjdkrc|jjdidd6|jjdjd6nx-|jjdjdkr|jjjqfW|jjj|jj|jj d|j_ n |jjdS(Niucaptionu$expected-one-end-tag-but-got-anotherugotNameu expectedNameuinTable( RGR:RRhRARDRiRR2R>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRF?s     cS`s@|jj|j}|jjjtd|s<|SdS(Nucaption(RDRiRGRWRqR(R?R}RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<Qs   cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=XscS`s|jjdj|S(NuinBody(RDR>Rq(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR[s( R5R6RCRGRuRnRERRFR<R=R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRDs        tInColumnGroupPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sj|||tjd|jfd|jfg|_|j|j_tjd|jfd|j fg|_ |j |j _dS(Nuhtmlucolucolgroup( RCR RRR8RRRtendTagColgroupt endTagColRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCas  cS`s|jjdjdkS(Niuhtml(R:RhRA(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagColgrouppscS`sJ|jjdjdkrdS|j}|jtd|sFtSdS(Niuhtmlucolgroup(R:RhRARLRJRR\(R?RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuss  cS`s-|j}|jtd|s)|SdS(Nucolgroup(RLRJR(R?R}RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn}s cS`s.|jj||jjjt|dRW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRJs cS`s|jjdidd6dS(Nu no-end-tagucoluname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRKscS`s-|j}|jtd|s)|SdS(Nucolgroup(RLRJR(R?R}RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs ( R5R6RCRLRuRnR8RRJRKR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRI^s     tInTableBodyPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c `sj|||tjd|jfd|jfd |jfd|jfg|_|j|j_ tjd|j fd |j fd|j fg|_ |j|j _ dS(Nuhtmlutrutduthucaptionucolucolgrouputbodyutfootutheadutableubody(utduth(ucaptionucolucolgrouputbodyutfootuthead(utbodyutfootuthead(ubodyucaptionucolucolgroupuhtmlutduthutr(RCR RRt startTagTrtstartTagTableCelltstartTagTableOtherRRRtendTagTableRowGroupR<R=RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs     cS`sPx-|jjdjdkr/|jjjqW|jjdjdkrLndS(Niutbodyutfootutheaduhtml(utbodyutfootutheaduhtml(R:RhRAR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableBodyContexts  cS`s|jjdjdS(NuinTable(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuscS`s|jjdj|S(NuinTable(RDR>Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jjdj|S(NuinTable(RDR>Rn(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s4|j|jj||jjd|j_dS(NuinRow(RRR:RRDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRNs cS`s8|jjdi|dd6|jtdd|S(Nuunexpected-cell-in-table-bodyunameutruStartTag(RDRiRNR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyROs cS`s|jjdddsH|jjdddsH|jjdddrv|j|jt|jjdj|S|jjdS(NutbodyRutableutheadutfooti( R:RRRRQRRhRARDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRPs cS`s|jjdj|S(NuinTable(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`sq|jj|dddrO|j|jjj|jjd|j_n|jjdi|dd6dS(NunameRutableuinTableu unexpected-end-tag-in-table-body( R:RRRRhRRDR>RWRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRQs   cS`s|jjdddsH|jjdddsH|jjdddrv|j|jt|jjdj|S|jjdS(NutbodyRutableutheadutfooti( R:RRRRQRRhRARDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<s cS`s"|jjdi|dd6dS(Nu unexpected-end-tag-in-table-bodyuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=s cS`s|jjdj|S(NuinTable(RDR>Rq(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRRRuRoRnRNRORPRRQR<R=R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRMs        t InRowPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZRS(c `sj|||tjd|jfd |jfd|jfg|_|j|j_tjd |j fd |j fd|j fd|j fg|_ |j|j _dS(Nuhtmlutduthucaptionucolucolgrouputbodyutfootutheadutrutableubody(utduth(ucaptionucolucolgrouputbodyutfootutheadutr(utbodyutfootuthead(ubodyucaptionucolucolgroupuhtmlutduth(RCR RRRORPRRRtendTagTrR<RQR=RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs     cS`s[xT|jjdjdkrV|jjdi|jjdjd6|jjjqWdS(Niutruhtmlu'unexpected-implied-end-tag-in-table-rowuname(utruhtml(R:RhRARDRiR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableRowContexts cS`s|jjddd S(NutrRutable(R:R(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagTrscS`s|jjdjdS(NuinTable(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu"scS`s|jjdj|S(NuinTable(RDR>Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo%scS`s|jjdj|S(NuinTable(RDR>Rn(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn(scS`sG|j|jj||jjd|j_|jjjtdS(NuinCell( RUR:RRDR>RWRRtR (R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRO+s cS`s-|j}|jtd|s)|SdS(Nutr(RVRTR(R?R}RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRP1s cS`s|jjdj|S(NuinTable(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8scS`sP|js?|j|jjj|jjd|j_n |jjdS(Nu inTableBody( RVRUR:RhRRDR>RWRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRT;s   cS`s-|j}|jtd|s)|SdS(Nutr(RVRTR(R?R}RH((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<Es cS`sD|jj|dddr3|jtd|S|jjdS(NunameRutableutr(R:RRTRRDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRQMscS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-table-rowuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=Ts cS`s|jjdj|S(NuinTable(RDR>Rq(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRXs(R5R6RCRURVRuRoRnRORPRRTR<RQR=R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRSs           t InCellPhasec`sheZfdZdZdZdZdZdZdZdZ dZ d Z RS( c `sj|||tjd|jfd |jfg|_|j|j_tjd|jfd|j fd|j fg|_ |j |j _dS(Nuhtmlucaptionucolucolgrouputbodyutdutfootuthutheadutrubodyutable( ucaptionucolucolgrouputbodyutdutfootuthutheadutr(utduth(ubodyucaptionucolucolgroupuhtml(utableutbodyutfootutheadutr( RCR RRRPRRRtendTagTableCellR=t endTagImplyRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC]s   cS`s`|jjdddr.|jtdn.|jjdddr\|jtdndS(NutdRutableuth(R:RRXR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt closeCellnscS`s|jjdjdS(NuinBody(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuuscS`s|jjdj|S(NuinBody(RDR>Rn(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnxscS`sO|jjddds0|jjdddr>|j|S|jjdS(NutdRutableuth(R:RRZRDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRP{s  cS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jj|dddr|jj|d|jjdj|dkr|jjdi|dd6xFtr|jjj}|j|dkrnPqnqnWn|jjj|jj |jj d|j_ n|jjdi|dd6dS(NunameRutableiuunexpected-cell-end-taguinRowuunexpected-end-tag( R:RRRhRARDRiR\RR2R>RW(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRXs    cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=scS`s;|jj|dddr*|j|S|jjdS(NunameRutable(R:RRZRDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRYs cS`s|jjdj|S(NuinBody(RDR>Rq(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs( R5R6RCRZRuRnRPRRXR=RYR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRW[s       t InSelectPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c`sj|||tjd|jfd|jfd|jfd|jfd |jfd|jfg|_ |j |j _ tjd|j fd|j fd|jfg|_|j|j_ dS( Nuhtmluoptionuoptgroupuselectuinputukeygenutextareauscript(uinputukeygenutextarea(RCR RRtstartTagOptiontstartTagOptgroupRRRRRRt endTagOptiontendTagOptgroupt endTagSelectRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs       cS`s0|jjdjdkr,|jjdndS(Niuhtmlu eof-in-select(R:RhRARDRi(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuscS`s,|ddkrdS|jj|ddS(Nudatau(R:R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s@|jjdjdkr,|jjjn|jj|dS(Niuoption(R:RhRARR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR\scS`sl|jjdjdkr,|jjjn|jjdjdkrX|jjjn|jj|dS(Niuoptionuoptgroup(R:RhRARR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]s cS`s'|jjd|jtddS(Nuunexpected-select-in-selectuselect(RDRiR`R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`sC|jjd|jjdddr?|jtd|SdS(Nuunexpected-input-in-selectuselectR(RDRiR:RR`R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s"|jjdi|dd6dS(Nuunexpected-start-tag-in-selectuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sJ|jjdjdkr,|jjjn|jjdidd6dS(Niuoptionuunexpected-end-tag-in-selectuname(R:RhRARRDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR^s cS`s|jjdjdkrE|jjdjdkrE|jjjn|jjdjdkrq|jjjn|jjdidd6dS(Niuoptioniuoptgroupuunexpected-end-tag-in-selectuname(R:RhRARRDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR_s cS`ss|jjdddrb|jjj}x%|jdkrQ|jjj}q-W|jjn |jjdS(NuselectR(R:RRhRRARDRYRi(R?R}R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR`s cS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-selectuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s (R5R6RCRuRnR\R]RRRRR^R_R`R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR[s          tInSelectInTablePhasec`sMeZfdZdZdZdZdZdZdZRS(c `sqj|||tjd |jfg|_|j|j_tjd |jfg|_|j |j_dS( Nucaptionutableutbodyutfootutheadutrutduth(ucaptionutableutbodyutfootutheadutrutduth(ucaptionutableutbodyutfootutheadutrutduth( RCR RRRRRR<RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC scS`s|jjdjdS(NuinSelect(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjdj|S(NuinSelect(RDR>Rn(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s5|jjdi|dd6|jtd|S(Nu5unexpected-table-element-start-tag-in-select-in-tableunameuselect(RDRiRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR! scS`s|jjdj|S(NuinSelect(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR& scS`sU|jjdi|dd6|jj|dddrQ|jtd|SdS(Nu3unexpected-table-element-end-tag-in-select-in-tableunameRutableuselect(RDRiR:RRR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<) scS`s|jjdj|S(NuinSelect(RDR>Rq(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR/ s( R5R6RCRuRnRRR<R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRa s     tInForeignContentPhasec-`seZedddddddddd d d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+g,Zfd,Zd-Zfd.Zd/Zd0ZRS(1ububigu blockquoteubodyubrucenterucodeuddudivudludtuemuembeduh1uh2uh3uh4uh5uh6uheaduhruiuimguliulistingumenuumetaunobruolupupreurubyususmalluspanustrongustrikeusubusuputableuttuuuuluvarc`sj|||dS(N(RC(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC< scS`s+i$dd6dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6}|dI|kr'||dI|dIl s(RDR]R RNRn(R?R}(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnh s   cS`s|jjd}|d|jks\|ddkrt|djtdddg@r|jjdi|dd6xm|jjdj|jjkr|jj |jjd r|jj |jjd r|jjj q}W|S|jt d kr|jj |n3|jt d krG|j||jj|n|jj||j|d <|jj||d r|jjj t|d RCRRR9RaRlRq(R?R}t nodeIndexRR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRq s(!  ( R5R6RmReRCRdRnRpRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRb2 s ) tAfterBodyPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`sqj|||tjd|jfg|_|j|j_tjd|jfg|_|j |j_dS(Nuhtml( RCR RRRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s!|jj||jjddS(Ni(R:RRh(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr scS`s*|jjd|jjd|j_|S(Nuunexpected-char-after-bodyuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s8|jjdi|dd6|jjd|j_|S(Nuunexpected-start-tag-after-bodyunameuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s9|jjr|jjdn|jjd|j_dS(Nu'unexpected-end-tag-after-body-innerhtmluafterAfterBody(RDRLRiR>RW(R?RA((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s8|jjdi|dd6|jjd|j_|S(Nuunexpected-end-tag-after-bodyunameuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s ( R5R6RCRuRrRnRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRi s      tInFramesetPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sj|||tjd|jfd|jfd|jfd|jfg|_|j|j_ tjd|j fg|_ |j |j _ dS(Nuhtmluframesetuframeunoframes( RCR RRRt startTagFrametstartTagNoframesRRRtendTagFramesetRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s   cS`s0|jjdjdkr,|jjdndS(Niuhtmlueof-in-frameset(R:RhRARDRi(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjddS(Nuunexpected-char-in-frameset(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jj|dS(N(R:R(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s$|jj||jjjdS(N(R:RRhR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRk scS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRl scS`s"|jjdi|dd6dS(Nu unexpected-start-tag-in-framesetuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s|jjdjdkr,|jjdn|jjj|jj r{|jjdjdkr{|jjd|j_ndS(Niuhtmlu)unexpected-frameset-in-frameset-innerhtmluframesetu afterFrameset( R:RhRARDRiRRLR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRm s  cS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-framesetuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s ( R5R6RCRuRnRRkRlRRmR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRj s       tAfterFramesetPhasec`sMeZfdZdZdZdZdZdZdZRS(c`s}j|||tjd|jfd|jfg|_|j|j_tjd|jfg|_ |j |j _dS(Nuhtmlunoframes( RCR RRRlRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjddS(Nuunexpected-char-after-frameset(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn! scS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRl$ scS`s"|jjdi|dd6dS(Nu#unexpected-start-tag-after-framesetuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR' s cS`s|jjd|j_dS(NuafterAfterFrameset(RDR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR+ scS`s"|jjdi|dd6dS(Nu!unexpected-end-tag-after-framesetuname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR. s ( R5R6RCRuRnRlRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn s     tAfterAfterBodyPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`sDj|||tjd|jfg|_|j|j_dS(Nuhtml(RCR RRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC3 scS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu; scS`s|jj||jjdS(N(R:RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr> scS`s|jjdj|S(NuinBody(RDR>Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoA scS`s*|jjd|jjd|j_|S(Nuexpected-eof-but-got-charuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnD scS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRI scS`s8|jjdi|dd6|jjd|j_|S(Nuexpected-eof-but-got-start-tagunameuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRL s cS`s8|jjdi|dd6|jjd|j_|S(Nuexpected-eof-but-got-end-tagunameuinBody(RDRiR>RW(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqR s ( R5R6RCRuRrRoRnRRRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo2 s      tAfterAfterFramesetPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sPj|||tjd|jfd|jfg|_|j|j_dS(Nuhtmlunoframes(RCR RRtstartTagNoFramesRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCY s  cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRub scS`s|jj||jjdS(N(R:RR(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRre scS`s|jjdj|S(NuinBody(RDR>Ro(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoh scS`s|jjddS(Nuexpected-eof-but-got-char(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnk scS`s|jjdj|S(NuinBody(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jjdj|S(NuinHead(RDR>Rp(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqq scS`s"|jjdi|dd6dS(Nuexpected-eof-but-got-start-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRt s cS`s"|jjdi|dd6dS(Nuexpected-eof-but-got-end-taguname(RDRi(R?R}((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqx s ( R5R6RCRuRrRoRnRRqRRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRpX s       uinitialu beforeHtmlu beforeHeaduinHeaduinHeadNoscriptu afterHeaduinBodyutextuinTableu inTableTextu inCaptionu inColumnGroupu inTableBodyuinRowuinCelluinSelectuinSelectInTableuinForeignContentu afterBodyu inFramesetu afterFramesetuafterAfterBodyuafterAfterFrameset(R(R@RPRRRRRRRRR3R5R@RDRIRMRSRWR[RaRbRiRjRnRoRp((RsD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=_sh  %)#.g@C!-GBbYLd's/9%&%c`s}tstjr,t|dt@}nt|dt@}|rytfd|djD|d s(RR tPY27RRmRR((R}Rctneeds_adjustment((RcsD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s uEndTagcC`s9|dkri}nit|d6|d6|d6|d6S(Nutypeunameudatau selfClosing(R9R(RAR,RbR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s  RcB`seZdZRS(uError in parsed document(R5R6R(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s(4t __future__RRRtpip._vendor.sixRRRR*t collectionsRt ImportErrortpip._vendor.ordereddicttRRR ttreebuilders.baseR R t constantsR R RRRRRRRRRRRRRRRR\RR&R7tobjectRtmemoizeR=RR9RNRt ExceptionR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyts>  j  (C  PK.e[dsX<<html5lib/_ihatexml.pycnu[ abc@`sZddlmZmZmZddlZddlZddlmZdZdZ dZ dZ d Z d j ee gZd j ee d d d e e gZd j ed gZejdZejdZdZdZeddZdZdZdZdZejdZejdZejdZdefdYZ dS(i(tabsolute_importtdivisiontunicode_literalsNi(tDataLossWarningu^ [#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]u*[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]u [#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309Au  [#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]u} #x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | #[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]u | u.u-u_u#x([\d|A-F]{4,4})u'\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]cC`s#g|jdD]}|j^q}g}x|D]}t}xttfD]}|j|}|dk rN|jg|jD]}t |^qt |ddkr|dd|dt_((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pytcoerceCharacterss  cC`s|}xHtj|D]7}tjdt|j|}|j||}qW|jr|jddkrtjdt|jd|jd}n|S(NuCoercing non-XML pubidu'iu!Pubid cannot contain single quote( tnonPubidCharRegexptfindallR6R7RtgetReplacementCharacterR'R1tfind(R3R>t dataOutputR*t replacement((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyt coercePubidsc C`s|d}|d}tj|}|rKtjdt|j|}n|}|}ttj|}x?|D]7}tjdt|j|} |j || }qsW||S(NiiuCoercing non-XML name( tnonXmlNameFirstBMPRegexpR R6R7RRFtsettnonXmlNameBMPRegexpRER'( R3R9t nameFirsttnameResttmtnameFirstOutputtnameRestOutputt replaceCharsR*RI((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyR8s   cC`s2||jkr|j|}n|j|}|S(N(R2t escapeChar(R3R*RI((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyRFscC`sBx;t|jj|D]!}|j||j|}qW|S(N(RLtreplacementRegexpRER't unescapeChar(R3R9R((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyt fromXmlNamescC`s!dt|}||j|<|S(NuU%05X(RR2(R3R*RI((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyRTs cC`stt|ddS(Nii(R"R%(R3tcharcode((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyRVsN(t__name__t __module__tretcompileRURRR4R R;R<R?RCRJR8RFRWRTRV(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyR+s"       (!t __future__RRRR[R6t constantsRtbaseChart ideographictcombiningCharactertdigittextenderR#tletterR9RNR\RRRRR%RR R$R R!RMRKRDtobjectR+(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyts2  0     PK.e[Qllhtml5lib/_inputstream.pycnu[ abc!@`sddlmZmZmZddlmZmZddlmZm Z ddl Z ddl Z ddl m Z ddlmZmZmZmZddlmZdd lmZdd lmZydd lmZWnek reZnXegeD]Zejd ^qZegeD]Zejd ^q"ZegeD]Zejd ^qJZeed dgBZ dZ!ej"re!ddkre!j#ddkst$e j%e!d e&ddZ'ne j%e!Z'e(dddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2g Z)e j%d3Z*iZ+d4e,fd5YZ-d6Z.d7e,fd8YZ/d9e/fd:YZ0d;e1fd<YZ2d=e,fd>YZ3d?e,fd@YZ4dAZ5dS(Bi(tabsolute_importtdivisiontunicode_literals(t text_typet binary_type(t http_clientturllibN(t webencodingsi(tEOFtspaceCharacterst asciiLetterstasciiUppercase(tReparseException(t_utils(tStringIO(tBytesIOuasciit>tt|j||krd|t|j|8}|d7}q'W||g|_dS(Nii(t_bufferedBytestAssertionErrorRRR(RRtoffsetti((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytseekLscC`sp|js|j|S|jdt|jkr_|jdt|jdkr_|j|S|j|SdS(Niii(Rt _readStreamRRt_readFromBuffer(Rtbytes((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytreadUs     cC`s&tg|jD]}t|^q S(N(tsumRR(Rtitem((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR^scC`sL|jj|}|jj||jdcd7 Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) u􏿿iiuutf-8ucertainN( R tsupports_lone_surrogatestNonetreportCharacterErrorsRtcharacterErrorsUCS4tcharacterErrorsUCS2tnewLinestlookupEncodingt charEncodingt openStreamt dataStreamtreset(RR?((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs    cC`sCd|_d|_d|_g|_d|_d|_d|_dS(Nui(Rt chunkSizet chunkOffsetterrorst prevNumLinest prevNumColsRFt_bufferedCharacter(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyROs      cC`s(t|dr|}n t|}|S(uvProduces a file object from source. source can be either a file object, local filename or a string. uread(R:R(RR?R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRMs  cC`st|j}|jdd|}|j|}|jdd|}|dkr\|j|}n||d}||fS(Nu iii(RtcountRStrfindRT(RRRtnLinest positionLinet lastLinePostpositionColumn((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt _positions   cC`s&|j|j\}}|d|fS(u:Returns (line, col) of the current position in the stream.i(R\RQ(Rtlinetcol((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sL|j|jkr%|js%tSn|j}|j|}|d|_|S(uo Read one character from the stream or queue if available. Return EOF when EOF is reached. i(RQRPt readChunkRR(RRQtchar((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR`s    cC`sO|dkr|j}n|j|j\|_|_d|_d|_d|_|jj |}|j r|j |}d|_ n |st St |dkrt |d}|dksd|kodknr|d|_ |d }qn|jr|j|n|jdd }|jd d }||_t ||_tS( Nuiiii iiu u u (RFt_defaultChunkSizeR\RPRSRTRRQRNR$RUR9RtordRGtreplacetTrue(RRPR(tlastv((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR_s0        (   cC`s:x3tttj|D]}|jjdqWdS(Nuinvalid-codepoint(trangeRtinvalid_unicode_retfindallRRR'(RR(t_((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRH%s"cC`st}xtj|D]}|r(qnt|j}|j}tj|||d!rtj|||d!}|t kr|j j dnt }q|dkr|dkr|t |dkr|j j dqt}|j j dqWdS(Niuinvalid-codepointiii(R9RgtfinditerRbtgrouptstartR tisSurrogatePairtsurrogatePairToCodepointtnon_bmp_invalid_codepointsRRR'RdR(RR(tskiptmatcht codepointRtchar_val((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRI)s    c C`s}yt||f}Wntk rx&|D]}t|dks+tq+Wdjg|D]}dt|^qZ}|sd|}ntjd|}t||fcB`sbeZdZd d d d dedZdZdZedZdZ dZ dZ RS( uProvides a unicode stream of characters to the HTMLTokenizer. This class takes care of character encoding and removing or replacing incorrect byte-sequences and also provides column and line tracking. u windows-1252cC`s|j||_tj||jd|_d|_||_||_||_||_ ||_ |j ||_ |j ddk st|jdS(uInitialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) iidiN(RMt rawStreamR=Rt numBytesMetatnumBytesChardettoverride_encodingttransport_encodingtsame_origin_parent_encodingtlikely_encodingtdefault_encodingtdetermineEncodingRLRFRRO(RR?RRRRRt useChardet((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs       cC`s3|jdjj|jd|_tj|dS(Niureplace(RLt codec_infot streamreaderRRNR=RO(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyROs"cC`sUt|dr|}n t|}y|j|jWnt|}nX|S(uvProduces a file object from source. source can be either a file object, local filename or a string. uread(R:RR RR(RR?R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRMs  cC`s!|jdf}|ddk r&|St|jdf}|ddk rO|St|jdf}|ddk rx|S|jdf}|ddk r|St|jdf}|ddk r|djjd r|St|j df}|ddk r|S|ryddl m }Wnt k r4qXg}|}x[|j s|jj|j}t|tszt|sPn|j||j|qGW|jt|jd}|jjd|dk r|dfSnt|jdf}|ddk r|StddfS(Nucertainiu tentativeuutf-16(tUniversalDetectoruencodingu windows-1252(t detectBOMRFRKRRtdetectEncodingMetaRtnamet startswithRtchardet.universaldetectorRt ImportErrortdoneRR$RR4R#RR'tfeedtclosetresultR R(RtchardetRLRtbufferstdetectorRtencoding((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRsR'       cC`s|jddkstt|}|dkr5dS|jdkretd}|dk stnr||jdkr|jddf|_nF|jjd|df|_|jtd|jd|fdS( Niucertainuutf-16beuutf-16leuutf-8iuEncoding changed from %s to %s(uutf-16beuutf-16le( RLRRKRFRRR ROR (Rt newEncoding((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytchangeEncodings    cC`sidtj6dtj6dtj6dtj6dtj6}|jjd}t|t s_t |j |d }d}|s|j |}d}|s|j |d }d}qn|r|jj |t |S|jj d d Sd S( uAttempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return Noneuutf-8uutf-16leuutf-16beuutf-32leuutf-32beiiiiN(tcodecstBOM_UTF8t BOM_UTF16_LEt BOM_UTF16_BEt BOM_UTF32_LEt BOM_UTF32_BERR$R4R#RtgetR RKRF(RtbomDicttstringRR ((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs&   cC`s|jj|j}t|ts*tt|}|jjd|j}|dk r||j dkr|t d}n|S(u9Report the encoding declared by the meta element iuutf-16beuutf-16leuutf-8N(uutf-16beuutf-16le( RR$RR4R#RtEncodingParserR t getEncodingRFRRK(RRtparserR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR9s  N( R1R2R3RFRdRRORMRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR>s(   >  "t EncodingBytescB`seZdZdZdZdZdZdZdZdZ dZ e e e Z d Z e e Zed Zd Zd Zd ZRS(uString-like object with an associated position and various extra methods If the position is ever greater than the string length then an exception is raisedcC`s+t|tsttj||jS(N(R4R#Rt__new__tlower(Rtvalue((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRLscC`s d|_dS(Ni(R\(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRPscC`s|S(N((R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt__iter__TscC`sS|jd}|_|t|kr/tn|dkrDtn|||d!S(Nii(R\Rt StopIterationR<(Rtp((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt__next__Ws    cC`s |jS(N(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytnext_scC`sY|j}|t|kr$tn|dkr9tn|d|_}|||d!S(Nii(R\RRR<(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytpreviouscs    cC`s+|jt|krtn||_dS(N(R\RR(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt setPositionls cC`s<|jt|krtn|jdkr4|jSdSdS(Ni(R\RRRF(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt getPositionqs  cC`s||j|jd!S(Ni(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytgetCurrentByte{scC`sc|j}xJ|t|krU|||d!}||krH||_|S|d7}q W||_dS(uSkip past a list of charactersiN(RRR\RF(RR{RR|((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRps    cC`sc|j}xJ|t|krU|||d!}||krH||_|S|d7}q W||_dS(Ni(RRR\RF(RR{RR|((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt skipUntils    cC`sQ|j}|||t|!}|j|}|rM|jt|7_n|S(uLook for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone(RRR(RR#RR(R,((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt matchBytess  cC`sh||jj|}|dkr^|jdkr=d|_n|j|t|d7_tStdS(uLook for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the matchiiiN(RtfindR\RRdR(RR#t newPosition((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytjumpTos  (R1R2R3RRRRRRRRtpropertyRRt currentBytetspaceCharactersBytesRpRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRHs           RcB`s_eZdZdZdZdZdZdZdZdZ dZ d Z RS( u?Mini parser for detecting character encoding from meta elementscC`st||_d|_dS(u3string - the data to work on for encoding detectionN(RR(RFR(RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sd|jfd|jfd|jfd|jfd|jfd|jff}xv|jD]k}t}xR|D]J\}}|jj|rky|}PWqtk rt }PqXqkqkW|sXPqXqXW|j S(Ns(R(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sK|jjtkrtSt}d}x"trF|j}|dkrGtS|ddkr|ddk}|rC|dk rC||_tSq%|ddkr|d}t|}|dk rC||_tSq%|ddkr%t t |d}|j }|dk rCt|}|dk r@|r4||_tS|}q@qCq%q%WdS(Nis http-equivis content-typetcharsettcontent( R(RRRdR9RFt getAttributeRRKtContentAttrParserRtparse(Rt hasPragmatpendingEncodingtattrttentativeEncodingtcodect contentParser((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs:             cC`s |jtS(N(thandlePossibleTagR9(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`st|j|jtS(N(RR(RRd(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs cC`s|j}|jtkr9|r5|j|jntS|jt}|dkra|jn+|j}x|dk r|j}qpWtS(NR( R(RtasciiLettersBytesRRRdRtspacesAngleBracketsRRF(RtendTagR(R|R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs      cC`s|jjdS(NR(R(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`s|j}|jttdgB}|dksIt|dksIt|d krYdSg}g}xtr |dkr|rPnz|tkr|j}Pn^|d krdj|dfS|t kr|j |j n|dkrdS|j |t |}qhW|dkr7|j dj|dfSt ||j}|d kr|}xtrt |}||krt |dj|dj|fS|t kr|j |j qb|j |qbWn^|dkrdj|dfS|t kr|j |j n|dkr-dS|j |x}trt |}|tkrwdj|dj|fS|t kr|j |j q=|dkrdS|j |q=WdS( u_Return a name,value pair for the next attribute in the stream, if one is found, or Nonet/iRt=R)t't"N(RN(RR(RR(R(RpRt frozensetRFRRRdR*tasciiUppercaseBytesR'RRRR(RR(R|tattrNamet attrValuet quoteChar((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRsh $                           ( R1R2R3RRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs    $    RcB`seZdZdZRS(cC`s"t|tst||_dS(N(R4R#RR((RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRfscC`s:y!|jjd|jjd7_|jj|jjdksHdS|jjd7_|jj|jjdkr|jj}|jjd7_|jj}|jj|r|j||jj!SdSnP|jj}y(|jjt|j||jj!SWntk r|j|SXWntk r5dSXdS(NRiRRR(RR( R(RRRpRRFRRR(Rt quoteMarkt oldPosition((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRjs.       (R1R2RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRes cC`swt|tr:y|jd}Wq:tk r6dSXn|dk roytj|SWqstk rkdSXndSdS(u{Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.uasciiN(R4RtdecodetUnicodeDecodeErrorRFRtlookuptAttributeError(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRKs   (6t __future__RRRtpip._vendor.sixRRtpip._vendor.six.movesRRRRvt pip._vendorRt constantsRR R R R R)R tioRRRRR&tencodeRRRRtinvalid_unicode_no_surrogateRERVRRwtevalRgtsetRotascii_punctuation_reRttobjectRRDR=R>R#RRRRK(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytsR  "  ((( + J h'PK.e[eNJJhtml5lib/__init__.pycnu[ abc@`sdZddlmZmZmZddlmZmZmZddl m Z ddl m Z ddl mZdd d d d d gZdZdS(uM HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage: import html5lib f = open("my_document.html") tree = html5lib.parse(f) i(tabsolute_importtdivisiontunicode_literalsi(t HTMLParsertparset parseFragment(tgetTreeBuilder(t getTreeWalker(t serializeu HTMLParseruparseu parseFragmentugetTreeBuilderu getTreeWalkeru serializeu1.0b10N(t__doc__t __future__RRRt html5parserRRRt treebuildersRt treewalkersRt serializerRt__all__t __version__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.pyt s  PK.e[ǻ4a7a7html5lib/serializer.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type import re from codecs import register_error, xmlcharrefreplace_errors from .constants import voidElements, booleanAttributes, spaceCharacters from .constants import rcdataElements, entities, xmlEntities from . import treewalkers, _utils from xml.sax.saxutils import escape _quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" _quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") _quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" "\u3000]") _encode_entity_map = {} _is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((_is_ucs4 and len(v) > 1) or (not _is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = _utils.surrogatePairToCodepoint(v) else: v = ord(v) if v not in _encode_entity_map or k.islower(): # prefer < over < and similarly for &, >, etc. _encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = _encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error("htmlentityreplace", htmlentityreplace_errors) def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) class HTMLSerializer(object): # attribute quoting options quote_attr_values = "legacy" # be secure by default quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values="legacy"|"spec"|"always" Whether to quote attribute values that don't require quoting per legacy browser behaviour, when required by the standard, or always. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities < > & " ' are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example becomes . use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g.
. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g.
. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation """ unexpected_args = frozenset(kwargs) - frozenset(self.options) if len(unexpected_args) > 0: raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "htmlentityreplace") else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): # pylint:disable=too-many-nested-blocks self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from .filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # Alphabetical attributes is here under the assumption that none of # the later filters add or change order of attributes; it needs to be # before the sanitizer so escaped elements come out correctly if self.alphabetical_attributes: from .filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from .filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from .filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from .filters.optionaltags import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifer contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("= 0: self.serializeError("Unexpected ") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError class SerializeError(Exception): """Error in serialized tree""" pass PK.e[&Xhtml5lib/_utils.pynu[from __future__ import absolute_import, division, unicode_literals import sys from types import ModuleType from pip._vendor.six import text_type try: import xml.etree.cElementTree as default_etree except ImportError: import xml.etree.ElementTree as default_etree __all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", "surrogatePairToCodepoint", "moduleFactoryFactory", "supports_lone_surrogates", "PY27"] PY27 = sys.version_info[0] == 2 and sys.version_info[1] >= 7 # Platforms not supporting lone surrogates (\uD800-\uDFFF) should be # caught by the below test. In general this would be any platform # using UTF-16 as its encoding of unicode strings, such as # Jython. This is because UTF-16 itself is based on the use of such # surrogates, and there is no mechanism to further escape such # escapes. try: _x = eval('"\\uD800"') # pylint:disable=eval-used if not isinstance(_x, text_type): # We need this with u"" because of http://bugs.jython.org/issue2039 _x = eval('u"\\uD800"') # pylint:disable=eval-used assert isinstance(_x, text_type) except: # pylint:disable=bare-except supports_lone_surrogates = False else: supports_lone_surrogates = True class MethodDispatcher(dict): """Dict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. """ def __init__(self, items=()): # Using _dictEntries instead of directly assigning to self is about # twice as fast. Please do careful performance testing before changing # anything here. _dictEntries = [] for name, value in items: if isinstance(name, (list, tuple, frozenset, set)): for item in name: _dictEntries.append((item, value)) else: _dictEntries.append((name, value)) dict.__init__(self, _dictEntries) assert len(self) == len(_dictEntries) self.default = None def __getitem__(self, key): return dict.get(self, key, self.default) # Some utility functions to deal with weirdness around UCS2 vs UCS4 # python builds def isSurrogatePair(data): return (len(data) == 2 and ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) def surrogatePairToCodepoint(data): char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + (ord(data[1]) - 0xDC00)) return char_val # Module Factory Factory (no, this isn't Java, I know) # Here to stop this being duplicated all over the place. def moduleFactoryFactory(factory): moduleCache = {} def moduleFactory(baseModule, *args, **kwargs): if isinstance(ModuleType.__name__, type("")): name = "_%s_factory" % baseModule.__name__ else: name = b"_%s_factory" % baseModule.__name__ kwargs_tuple = tuple(kwargs.items()) try: return moduleCache[name][args][kwargs_tuple] except KeyError: mod = ModuleType(name) objs = factory(baseModule, *args, **kwargs) mod.__dict__.update(objs) if "name" not in moduleCache: moduleCache[name] = {} if "args" not in moduleCache[name]: moduleCache[name][args] = {} if "kwargs" not in moduleCache[name][args]: moduleCache[name][args][kwargs_tuple] = {} moduleCache[name][args][kwargs_tuple] = mod return mod return moduleFactory def memoize(func): cache = {} def wrapped(*args, **kwargs): key = (tuple(args), tuple(kwargs.items())) if key not in cache: cache[key] = func(*args, **kwargs) return cache[key] return wrapped PK.e[i*J.J.html5lib/serializer.pycnu[ abc@`sddlmZmZmZddlmZddlZddlmZm Z ddl m Z m Z m Z ddl mZmZmZddlmZmZdd lmZd je d Zejd ed Zejd edZiZeddkZxeejD]\Z Z!er<ee!dkse r[ee!dkr[qne!dkree!dkrej"e!Z!n e#e!Z!e!ekse j$re ee!`u[u]u_  /`  ᠎᠏           

   ]u􏿿iu&c C`st|ttfrqg}g}t}xt|j|j|j!D]\}}|rbt}qDn||j}tj |j|t |j|dg!rtj |j||d!}t }n t |}|j|qDWxz|D]r}tj|} | r<|jd|j| | jdsW|jdqWq|jdt|dqWdj||jfSt|SdS(Niu&u;u&#x%s;u(t isinstancetUnicodeEncodeErrortUnicodeTranslateErrortFalset enumeratetobjecttstarttendR tisSurrogatePairtmintsurrogatePairToCodepointtTruetordtappendt_encode_entity_maptgettendswiththextjoinR( texctrest codepointstskiptitctindext codepointtcpte((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pythtmlentityreplace_errors*s0) ,     uhtmlentityreplaceuetreecK`s1tj|}t|}|j|||S(N(R t getTreeWalkertHTMLSerializertrender(tinputttreetencodingtserializer_optstwalkerts((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt serializeJs R.cB`seZdZdZeZeZeZeZ eZ eZ eZ eZ eZeZeZeZdZdZdZdZddZddZddZRS(ulegacyu"uquote_attr_valuesu quote_charuuse_best_quote_charuomit_optional_tagsuminimize_boolean_attributesuuse_trailing_solidususpace_before_trailing_solidusuescape_lt_in_attrsu escape_rcdatauresolve_entitiesualphabetical_attributesuinject_meta_charsetustrip_whitespaceusanitizec K`st|t|j}t|dkrJtdtt|nd|krbt|_nx6|jD]+}t|||j |t ||qlWg|_ t|_ dS(u6 Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values="legacy"|"spec"|"always" Whether to quote attribute values that don't require quoting per legacy browser behaviour, when required by the standard, or always. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities < > & " ' are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example becomes . use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g.
. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g.
. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation iu2__init__() got an unexpected keyword argument '%s'u quote_charN( t frozensettoptionstlent TypeErrortnexttiterRtuse_best_quote_chartsetattrRtgetattrterrorststrict(tselftkwargstunexpected_argstattr((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt__init__ps.  ) cC`s9t|tst|jr1|j|jdS|SdS(Nuhtmlentityreplace(RRtAssertionErrorR2tencode(RBtstring((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRHs cC`s9t|tst|jr1|j|jdS|SdS(Nustrict(RRRGR2RH(RBRI((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyt encodeStricts cc`sn||_t}g|_|rI|jrIddlm}|||}n|jrqddlm}||}n|jrddl m}||}n|j rddl m}||}n|j rddl m}||}nx~|D]v}|d}|dkrd|d}|dr9|d|d7}n|d rP|d 7}n|d r|d jd d kr|d jd d kr|jdnd }nd }|d||d |f7}n|d7}|j|Vq|d3kra|dks|rF|r1|djdd kr1|jdn|j|dVqf|jt|dVq|d4kr.|d} |jd| V| tkr|j rt}n|r|jdnx|djD] \\} } } | } | }|jdV|j| V|j sI| tj| tkr| tjdtkr|jdV|jdksxt|d krt}nZ|jdkrtj|dk }n3|jdkrt j|dk }n t!d |j"d!d"}|j#r |j"d#d$}n|r|j$}|j%rhd |krDd |krDd }qhd |krhd |krhd }qhn|d kr|j"d d%}n|j"d d&}|j|V|j|V|j|Vq|j|VqqW| t&kr|j'r|j(r |jd'Vq|jd(Vn|jdVq|d)kr|d} | tkrYt}n|ro|jdn|jd*| Vq|d+kr|d}|jd,d kr|jd-n|jd.|dVq|d/krU|d} | d0}|t)kr|jd1| n|j*r:|t+kr:t)|}n d2| }|j|Vq|j|dqWdS(5Ni(tFilterutypeuDoctypeu u CharactersuSpaceCharactersudatauuCommentu--uComment contains --u uEntityu;uEntity %s not recognizedu&%s;(u CharactersuSpaceCharacters(uStartTaguEmptyTag(,R2RR@tinject_meta_charsettfilters.inject_meta_charsetRKtalphabetical_attributestfilters.alphabeticalattributeststrip_whitespacetfilters.whitespacetsanitizetfilters.sanitizertomit_optional_tagstfilters.optionaltagstfindtserializeErrorRJRHRR t escape_rcdataRtitemstminimize_boolean_attributesRRttupletquote_attr_valuesR9t_quoteAttributeSpectsearchtNonet_quoteAttributeLegacyt ValueErrortreplacetescape_lt_in_attrst quote_charR=Rtuse_trailing_solidustspace_before_trailing_solidusR tresolve_entitiesR (RBt treewalkerR2tin_cdataRKttokenttypetdoctypeRdtnamet_t attr_namet attr_valuetktvt quote_attrtdatatkey((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR6s                   # !                     cC`sE|r%djt|j||Sdjt|j|SdS(Ntu(R!tlistR6(RBRhR2((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR/?suXXX ERROR MESSAGE NEEDEDcC`s&|jj||jr"tndS(N(R@RRAtSerializeError(RBRt((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRWEs (uquote_attr_valuesu quote_charuuse_best_quote_charuomit_optional_tagsuminimize_boolean_attributesuuse_trailing_solidususpace_before_trailing_solidusuescape_lt_in_attrsu escape_rcdatauresolve_entitiesualphabetical_attributesuinject_meta_charsetustrip_whitespaceusanitizeN(t__name__t __module__R\RdRR=RTRZRReRfRcRXRgRNRLRPRRR8RFRHRJR_R6R/RW(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyR.Qs4 8   RxcB`seZdZRS(uError in serialized tree(RyRzt__doc__(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyRxLs(,t __future__RRRtpip._vendor.sixRtretcodecsRRt constantsRRRR R R RvR R txml.sax.saxutilsRR!t_quoteAttributeSpecCharstcompileR]R`RR9t_is_ucs4RwRYRqRrRRtislowerR,R_R6RR.t ExceptionRx(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyts8       PK.e[Fj$+$+html5lib/_tokenizer.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import unichr as chr from collections import deque from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from ._inputstream import HTMLInputStream from ._trie import Trie entitiesTrie = Trie(entities) class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, parser=None, **kwargs): self.stream = HTMLInputStream(stream, **kwargs) self.parser = parser # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of ¬i for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ""))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, _ in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # pylint:disable=redefined-variable-type # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for _ in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True PK.e["1LLhtml5lib/treewalkers/etree.pynu[from __future__ import absolute_import, division, unicode_literals try: from collections import OrderedDict except ImportError: try: from ordereddict import OrderedDict except ImportError: OrderedDict = dict import re from pip._vendor.six import string_types from . import base from .._utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable """Given the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: 1. The current element 2. The index of the element relative to its parent 3. A stack of ancestor elements 4. A flag "text", "tail" or None to indicate if the current node is a text node; either the text or tail of the current element (1) """ def getNodeDetails(self, node): if isinstance(node, tuple): # It might be the root Element elt, _, _, flag = node if flag in ("text", "tail"): return base.TEXT, getattr(elt, flag) else: node = elt if not(hasattr(node, "tag")): node = node.getroot() if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): return (base.DOCUMENT,) elif node.tag == "": return (base.DOCTYPE, node.text, node.get("publicId"), node.get("systemId")) elif node.tag == ElementTreeCommentType: return base.COMMENT, node.text else: assert isinstance(node.tag, string_types), type(node.tag) # This is assumed to be an ordinary element match = tag_regexp.match(node.tag) if match: namespace, tag = match.groups() else: namespace = None tag = node.tag attrs = OrderedDict() for name, value in list(node.attrib.items()): match = tag_regexp.match(name) if match: attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value return (base.ELEMENT, namespace, tag, attrs, len(node) or node.text) def getFirstChild(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: element, key, parents, flag = node, None, [], None if flag in ("text", "tail"): return None else: if element.text: return element, key, parents, "text" elif len(element): parents.append(element) return element[0], 0, parents, None else: return None def getNextSibling(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if len(element): parents.append(element) return element[0], 0, parents, None else: return None else: if element.tail and flag != "tail": return element, key, parents, "tail" elif key < len(parents[-1]) - 1: return parents[-1][key + 1], key + 1, parents, None else: return None def getParentNode(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if not parents: return element else: return element, key, parents, None else: parent = parents.pop() if not parents: return parent else: assert list(parents[-1]).count(parent) == 1 return parent, list(parents[-1]).index(parent), parents, None return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder) PK.e[q[html5lib/treewalkers/dom.pyonu[ abc@`sYddlmZmZmZddlmZddlmZdejfdYZ dS(i(tabsolute_importtdivisiontunicode_literals(tNodei(tbaset TreeWalkercB`s,eZdZdZdZdZRS(cC`sX|jtjkr.tj|j|j|jfS|jtjtj fkrYtj |j fS|jtj kri}xgt |jjD]P}|j|}|jr|j||j|jfsPK.e[M#KKhtml5lib/treewalkers/base.pynu[from __future__ import absolute_import, division, unicode_literals from xml.dom import Node from ..constants import namespaces, voidElements, spaceCharacters __all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", "TreeWalker", "NonRecursiveTreeWalker"] DOCUMENT = Node.DOCUMENT_NODE DOCTYPE = Node.DOCUMENT_TYPE_NODE TEXT = Node.TEXT_NODE ELEMENT = Node.ELEMENT_NODE COMMENT = Node.COMMENT_NODE ENTITY = Node.ENTITY_NODE UNKNOWN = "<#UNKNOWN#>" spaceCharacters = "".join(spaceCharacters) class TreeWalker(object): def __init__(self, tree): self.tree = tree def __iter__(self): raise NotImplementedError def error(self, msg): return {"type": "SerializeError", "data": msg} def emptyTag(self, namespace, name, attrs, hasChildren=False): yield {"type": "EmptyTag", "name": name, "namespace": namespace, "data": attrs} if hasChildren: yield self.error("Void element has children") def startTag(self, namespace, name, attrs): return {"type": "StartTag", "name": name, "namespace": namespace, "data": attrs} def endTag(self, namespace, name): return {"type": "EndTag", "name": name, "namespace": namespace} def text(self, data): data = data middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right} def comment(self, data): return {"type": "Comment", "data": data} def doctype(self, name, publicId=None, systemId=None): return {"type": "Doctype", "name": name, "publicId": publicId, "systemId": systemId} def entity(self, name): return {"type": "Entity", "name": name} def unknown(self, nodeType): return self.error("Unknown node type: " + nodeType) class NonRecursiveTreeWalker(TreeWalker): def getNodeDetails(self, node): raise NotImplementedError def getFirstChild(self, node): raise NotImplementedError def getNextSibling(self, node): raise NotImplementedError def getParentNode(self, node): raise NotImplementedError def __iter__(self): currentNode = self.tree while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] hasChildren = False if type == DOCTYPE: yield self.doctype(*details) elif type == TEXT: for token in self.text(*details): yield token elif type == ELEMENT: namespace, name, attributes, hasChildren = details if (not namespace or namespace == namespaces["html"]) and name in voidElements: for token in self.emptyTag(namespace, name, attributes, hasChildren): yield token hasChildren = False else: yield self.startTag(namespace, name, attributes) elif type == COMMENT: yield self.comment(details[0]) elif type == ENTITY: yield self.entity(details[0]) elif type == DOCUMENT: hasChildren = True else: yield self.unknown(details[0]) if hasChildren: firstChild = self.getFirstChild(currentNode) else: firstChild = None if firstChild is not None: currentNode = firstChild else: while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] if type == ELEMENT: namespace, name, attributes, hasChildren = details if (namespace and namespace != namespaces["html"]) or name not in voidElements: yield self.endTag(namespace, name) if self.tree is currentNode: currentNode = None break nextSibling = self.getNextSibling(currentNode) if nextSibling is not None: currentNode = nextSibling break else: currentNode = self.getParentNode(currentNode) PK.e[ G"html5lib/treewalkers/etree_lxml.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from lxml import etree from ..treebuilders.etree import tag_regexp from . import base from .. import _ihatexml def ensure_str(s): if s is None: return None elif isinstance(s, text_type): return s else: return s.decode("ascii", "strict") class Root(object): def __init__(self, et): self.elementtree = et self.children = [] try: if et.docinfo.internalDTD: self.children.append(Doctype(self, ensure_str(et.docinfo.root_name), ensure_str(et.docinfo.public_id), ensure_str(et.docinfo.system_url))) except AttributeError: pass try: node = et.getroot() except AttributeError: node = et while node.getprevious() is not None: node = node.getprevious() while node is not None: self.children.append(node) node = node.getnext() self.text = None self.tail = None def __getitem__(self, key): return self.children[key] def getnext(self): return None def __len__(self): return 1 class Doctype(object): def __init__(self, root_node, name, public_id, system_id): self.root_node = root_node self.name = name self.public_id = public_id self.system_id = system_id self.text = None self.tail = None def getnext(self): return self.root_node.children[1] class FragmentRoot(Root): def __init__(self, children): self.children = [FragmentWrapper(self, child) for child in children] self.text = self.tail = None def getnext(self): return None class FragmentWrapper(object): def __init__(self, fragment_root, obj): self.root_node = fragment_root self.obj = obj if hasattr(self.obj, 'text'): self.text = ensure_str(self.obj.text) else: self.text = None if hasattr(self.obj, 'tail'): self.tail = ensure_str(self.obj.tail) else: self.tail = None def __getattr__(self, name): return getattr(self.obj, name) def getnext(self): siblings = self.root_node.children idx = siblings.index(self) if idx < len(siblings) - 1: return siblings[idx + 1] else: return None def __getitem__(self, key): return self.obj[key] def __bool__(self): return bool(self.obj) def getparent(self): return None def __str__(self): return str(self.obj) def __unicode__(self): return str(self.obj) def __len__(self): return len(self.obj) class TreeWalker(base.NonRecursiveTreeWalker): def __init__(self, tree): # pylint:disable=redefined-variable-type if isinstance(tree, list): self.fragmentChildren = set(tree) tree = FragmentRoot(tree) else: self.fragmentChildren = set() tree = Root(tree) base.NonRecursiveTreeWalker.__init__(self, tree) self.filter = _ihatexml.InfosetFilter() def getNodeDetails(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key return base.TEXT, ensure_str(getattr(node, key)) elif isinstance(node, Root): return (base.DOCUMENT,) elif isinstance(node, Doctype): return base.DOCTYPE, node.name, node.public_id, node.system_id elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): return base.TEXT, ensure_str(node.obj) elif node.tag == etree.Comment: return base.COMMENT, ensure_str(node.text) elif node.tag == etree.Entity: return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; else: # This is assumed to be an ordinary element match = tag_regexp.match(ensure_str(node.tag)) if match: namespace, tag = match.groups() else: namespace = None tag = ensure_str(node.tag) attrs = {} for name, value in list(node.attrib.items()): name = ensure_str(name) value = ensure_str(value) match = tag_regexp.match(name) if match: attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), attrs, len(node) > 0 or node.text) def getFirstChild(self, node): assert not isinstance(node, tuple), "Text nodes have no children" assert len(node) or node.text, "Node has no children" if node.text: return (node, "text") else: return node[0] def getNextSibling(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key if key == "text": # XXX: we cannot use a "bool(node) and node[0] or None" construct here # because node[0] might evaluate to False if it has no child element if len(node): return node[0] else: return None else: # tail return node.getnext() return (node, "tail") if node.tail else node.getnext() def getParentNode(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key if key == "text": return node # else: fallback to "normal" processing elif node in self.fragmentChildren: return None return node.getparent() PK.e[~html5lib/treewalkers/etree.pycnu[ abc@`sddlmZmZmZyddlmZWn?ek rqyddlmZWqrek rmeZqrXnXddl Z ddl m Z ddl m Z ddlmZe jd Zd ZeeZdS( i(tabsolute_importtdivisiontunicode_literals(t OrderedDictN(t string_typesi(tbasei(tmoduleFactoryFactoryu {([^}]*)}(.*)c`s>|}|jdjdtjffdY}tS(Nuasdt TreeWalkerc`s8eZdZfdZdZdZdZRS(uGiven the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: 1. The current element 2. The index of the element relative to its parent 3. A stack of ancestor elements 4. A flag "text", "tail" or None to indicate if the current node is a text node; either the text or tail of the current element (1) c `st|trL|\}}}}|d krCtjt||fS|}nt|dsj|j}n|jd krtjfS|jdkrtj |j |j d|j dfS|jkrtj |j fSt|jt stt|jtj|j}|r-|j\}}nd}|j}t}xmt|jjD]V\} } tj| }|r| ||jd |jd fupublicIdusystemIdii(utextutail(u DOCUMENT_ROOTuDOCUMENT_FRAGMENT(t isinstancettupleRtTEXTtgetattrthasattrtgetrootttagtDOCUMENTtDOCTYPEttexttgettCOMMENTRtAssertionErrorttypet tag_regexptmatchtgroupstNoneRtlisttattribtitemstgrouptELEMENTtlen( tselftnodeteltt_tflagRt namespaceRtattrstnametvalue(tElementTreeCommentType(sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetNodeDetails's8    '  "% cS`st|tr$|\}}}}n|dgdf\}}}}|dkrRdS|jrk|||dfSt|r|j||dd|dfSdSdS(Nutextutaili(utextutail(RR RRRtappend(R R!telementtkeytparentsR$((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyt getFirstChildOs    cS`st|tr$|\}}}}ndS|dkrht|ra|j||dd|dfSdSnc|jr|dkr|||dfS|t|ddkr|d|d|d|dfSdSdS(Nutextiutailii(RR RRR+ttail(R R!R,R-R.R$((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetNextSibling`s    cS`st|tr$|\}}}}ndS|dkrQ|s>|S|||dfSn^|j}|sg|St|dj|dkst|t|dj||dfSdS(Nutextii(RR RtpopRtcountRtindex(R R!R,R-R.R$tparent((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyt getParentNodets  %(t__name__t __module__t__doc__R*R/R1R6((R)(sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyRs  (  (tCommentRRtNonRecursiveTreeWalkertlocals(tElementTreeImplementationt ElementTreeR((R)sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetETreeBuildersn(t __future__RRRt collectionsRt ImportErrort ordereddicttdicttretpip._vendor.sixRtRt_utilsRtcompileRR?tgetETreeModule(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyts    tPK.e[_m  html5lib/treewalkers/genshi.pynu[from __future__ import absolute_import, division, unicode_literals from genshi.core import QName from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT from . import base from ..constants import voidElements, namespaces class TreeWalker(base.TreeWalker): def __iter__(self): # Buffer the events so we can pass in the following one previous = None for event in self.tree: if previous is not None: for token in self.tokens(previous, event): yield token previous = event # Don't forget the final event! if previous is not None: for token in self.tokens(previous, None): yield token def tokens(self, event, next): kind, data, _ = event if kind == START: tag, attribs = data name = tag.localname namespace = tag.namespace converted_attribs = {} for k, v in attribs: if isinstance(k, QName): converted_attribs[(k.namespace, k.localname)] = v else: converted_attribs[(None, k)] = v if namespace == namespaces["html"] and name in voidElements: for token in self.emptyTag(namespace, name, converted_attribs, not next or next[0] != END or next[1] != tag): yield token else: yield self.startTag(namespace, name, converted_attribs) elif kind == END: name = data.localname namespace = data.namespace if namespace != namespaces["html"] or name not in voidElements: yield self.endTag(namespace, name) elif kind == COMMENT: yield self.comment(data) elif kind == TEXT: for token in self.text(data): yield token elif kind == DOCTYPE: yield self.doctype(*data) elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI): pass else: yield self.unknown(kind) PK.e[w html5lib/treewalkers/genshi.pyonu[ abc@`sddlmZmZmZddlmZddlmZmZmZm Z m Z ddlm Z m Z m Z mZmZmZddlmZddlmZmZd ejfd YZd S( i(tabsolute_importtdivisiontunicode_literals(tQName(tSTARTtENDt XML_NAMESPACEtDOCTYPEtTEXT(tSTART_NStEND_NSt START_CDATAt END_CDATAtPItCOMMENTi(tbasei(t voidElementst namespacest TreeWalkercB`seZdZdZRS(cc`sd}xH|jD]=}|dk rGx"|j||D] }|Vq5Wn|}qW|dk rx"|j|dD] }|VqpWndS(N(tNonettreettokens(tselftpreviousteventttoken((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.pyt__iter__ s    cc`s|\}}}|tkr |\}}|j}|j} i} xL|D]D\} } t| trz| | | j| jfs (.PK.e["html5lib/treewalkers/base.pycnu[ abc @`sddlmZmZmZddlmZddlmZmZm Z ddddd d d d d g Z ej Z ej ZejZejZejZejZdZdje Z defdYZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(tNodei(t namespacest voidElementstspaceCharactersuDOCUMENTuDOCTYPEuTEXTuELEMENTuCOMMENTuENTITYuUNKNOWNu TreeWalkeruNonRecursiveTreeWalkeru <#UNKNOWN#>ut TreeWalkercB`steZdZdZdZedZdZdZdZ dZ d d dZ d Z d ZRS( cC`s ||_dS(N(ttree(tselfR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt__init__scC`s tdS(N(tNotImplementedError(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt__iter__scC`sidd6|d6S(NuSerializeErrorutypeudata((R tmsg((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyterrorscc`s<idd6|d6|d6|d6V|r8|jdVndS(NuEmptyTagutypeunameu namespaceudatauVoid element has children(R(R t namespacetnametattrst hasChildren((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytemptyTags  cC`s idd6|d6|d6|d6S(NuStartTagutypeunameu namespaceudata((R RRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytstartTag%s cC`sidd6|d6|d6S(NuEndTagutypeunameu namespace((R RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytendTag+s cc`s|}|jt}|t|t| }|rKidd6|d6Vn|}|jt}|t|}|ridd6|d6Vn|ridd6|d6VndS(NuSpaceCharactersutypeudatau Characters(tlstripRtlentrstrip(R tdatatmiddletlefttright((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyttext0scC`sidd6|d6S(NuCommentutypeudata((R R((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytcomment>scC`s idd6|d6|d6|d6S(NuDoctypeutypeunameupublicIdusystemId((R RtpublicIdtsystemId((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytdoctypeAs cC`sidd6|d6S(NuEntityutypeuname((R R((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytentityGscC`s|jd|S(NuUnknown node type: (R(R tnodeType((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytunknownJsN(t__name__t __module__R R RtFalseRRRRRtNoneR!R"R$(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyRs         tNonRecursiveTreeWalkercB`s5eZdZdZdZdZdZRS(cC`s tdS(N(R (R tnode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytgetNodeDetailsOscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt getFirstChildRscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytgetNextSiblingUscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt getParentNodeXsc c`s|j}x|dk r|j|}|d|d}}t}|tkr_|j|Vn |tkrx|j|D] }|Vq{Wn|tkr|\}}}}| s|t dkr|t krx%|j ||||D] }|VqWt}q|j |||Vni|t kr7|j|dVnH|tkrX|j|dVn'|tkrmt}n|j|dV|r|j|} nd} | dk r| }q x|dk r|j|}|d|d}}|tkr<|\}}}}|r|t dks%|t kr<|j||Vq<n|j|krUd}Pn|j|} | dk rz| }Pq|j|}qWq WdS(Niiuhtml(RR(R+R'tDOCTYPER!tTEXTRtELEMENTRRRRtCOMMENTRtENTITYR"tDOCUMENTtTrueR$R,RR-R.( R t currentNodetdetailsttypeRttokenRRt attributest firstChildt nextSibling((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyR [sZ     #          " (R%R&R+R,R-R.R (((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyR)Ns     N(t __future__RRRtxml.domRt constantsRRRt__all__t DOCUMENT_NODER4tDOCUMENT_TYPE_NODER/t TEXT_NODER0t ELEMENT_NODER1t COMMENT_NODER2t ENTITY_NODER3tUNKNOWNtjointobjectRR)(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyts       :PK.e[&>OOhtml5lib/treewalkers/etree.pyonu[ abc@`sddlmZmZmZyddlmZWn?ek rqyddlmZWqrek rmeZqrXnXddl Z ddl m Z ddl m Z ddlmZe jd Zd ZeeZdS( i(tabsolute_importtdivisiontunicode_literals(t OrderedDictN(t string_typesi(tbasei(tmoduleFactoryFactoryu {([^}]*)}(.*)c`s>|}|jdjdtjffdY}tS(Nuasdt TreeWalkerc`s8eZdZfdZdZdZdZRS(uGiven the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: 1. The current element 2. The index of the element relative to its parent 3. A stack of ancestor elements 4. A flag "text", "tail" or None to indicate if the current node is a text node; either the text or tail of the current element (1) c `st|trL|\}}}}|d krCtjt||fS|}nt|dsj|j}n|jd krtjfS|jdkrtj |j |j d|j dfS|jkrtj |j fSt j|j}|r|j\}}nd}|j}t}xmt|jjD]V\} } t j| }|rz| ||jd |jd fupublicIdusystemIdii(utextutail(u DOCUMENT_ROOTuDOCUMENT_FRAGMENT(t isinstancettupleRtTEXTtgetattrthasattrtgetrootttagtDOCUMENTtDOCTYPEttexttgettCOMMENTt tag_regexptmatchtgroupstNoneRtlisttattribtitemstgrouptELEMENTtlen( tselftnodeteltt_tflagRt namespaceRtattrstnametvalue(tElementTreeCommentType(sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetNodeDetails's6      "% cS`st|tr$|\}}}}n|dgdf\}}}}|dkrRdS|jrk|||dfSt|r|j||dd|dfSdSdS(Nutextutaili(utextutail(RR RRRtappend(RRtelementtkeytparentsR"((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyt getFirstChildOs    cS`st|tr$|\}}}}ndS|dkrht|ra|j||dd|dfSdSnc|jr|dkr|||dfS|t|ddkr|d|d|d|dfSdSdS(Nutextiutailii(RR RRR)ttail(RRR*R+R,R"((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetNextSibling`s    cS`st|tr$|\}}}}ndS|dkrQ|s>|S|||dfSn9|j}|sg|S|t|dj||dfSdS(Nutexti(RR RtpopRtindex(RRR*R+R,R"tparent((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyt getParentNodets  (t__name__t __module__t__doc__R(R-R/R3((R'(sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyRs  (  (tCommentRRtNonRecursiveTreeWalkertlocals(tElementTreeImplementationt ElementTreeR((R'sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pytgetETreeBuildersn(t __future__RRRt collectionsRt ImportErrort ordereddicttdicttretpip._vendor.sixRtRt_utilsRtcompileRR<tgetETreeModule(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyts    tPK.e[Jn$$#html5lib/treewalkers/etree_lxml.pycnu[ abc@`sddlmZmZmZddlmZddlmZddlm Z ddl m Z ddl m Z d Z d efd YZd efd YZdefdYZdefdYZde jfdYZdS(i(tabsolute_importtdivisiontunicode_literals(t text_type(tetreei(t tag_regexpi(tbase(t _ihatexmlcC`s7|dkrdSt|tr#|S|jddSdS(Nuasciiustrict(tNonet isinstanceRtdecode(ts((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt ensure_str s  tRootcB`s,eZdZdZdZdZRS(cC`s||_g|_yV|jjrg|jjt|t|jjt|jjt|jj nWnt k r{nXy|j }Wnt k r|}nXx"|j dk r|j }qWx,|dk r|jj||j}qWd|_d|_dS(N(t elementtreetchildrentdocinfot internalDTDtappendtDoctypeR t root_namet public_idt system_urltAttributeErrortgetroott getpreviousRtgetnextttextttail(tselftettnode((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__init__s*       cC`s |j|S(N(R(Rtkey((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __getitem__1scC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR4scC`sdS(Ni((R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__len__7s(t__name__t __module__R R"RR#(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR s   RcB`seZdZdZRS(cC`s:||_||_||_||_d|_d|_dS(N(t root_nodetnameRt system_idRRR(RR&R'RR(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR <s      cC`s|jjdS(Ni(R&R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyREs(R$R%R R(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR;s t FragmentRootcB`seZdZdZRS(cC`s9g|D]}t||^q|_d|_|_dS(N(tFragmentWrapperRRRR(RRtchild((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR Js%cC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyRNs(R$R%R R(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR)Is R*cB`sYeZdZdZdZdZdZdZdZdZ dZ RS( cC`s|||_||_t|jdr<t|jj|_n d|_t|jdrot|jj|_n d|_dS(Nutextutail(R&tobjthasattrR RRR(Rt fragment_rootR,((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR Ss   cC`st|j|S(N(tgetattrR,(RR'((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __getattr___scC`sE|jj}|j|}|t|dkr=||dSdSdS(Ni(R&RtindextlenR(Rtsiblingstidx((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyRbs   cC`s |j|S(N(R,(RR!((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR"jscC`s t|jS(N(tboolR,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__bool__mscC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt getparentpscC`s t|jS(N(tstrR,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__str__sscC`s t|jS(N(R8R,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __unicode__vscC`s t|jS(N(R2R,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR#ys( R$R%R R0RR"R6R7R9R:R#(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR*Rs       t TreeWalkercB`s5eZdZdZdZdZdZRS(cC`skt|tr-t||_t|}nt|_t|}tjj||t j |_ dS(N( R tlisttsettfragmentChildrenR)R RtNonRecursiveTreeWalkerR Rt InfosetFiltertfilter(Rttree((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR ~s  c C`s4t|trS|\}}|d ks7td|tjtt||fSt|trltjfSt|t rtj |j |j |j fSt|trt|d rtjt|jfS|jtjkrtjt|jfS|jtjkr#tjt|jdd!fStjt|j}|rV|j\}}nd}t|j}i}xt|jjD]n\}}t|}t|}tj|}|r|||jd|jdfRR7(RRR!((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt getParentNodes  (R$R%R RWRXRYRZ(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR;}s  ) N(t __future__RRRtpip._vendor.sixRtlxmlRttreebuilders.etreeRtRRR tobjectR RR)R*R?R;(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyts & +PK.e[@#yy!html5lib/treewalkers/__init__.pycnu[ abc@`sdZddlmZmZmZddlmZddlmZdddd d d gZ iZ dd Z d Z dZdS(uA collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. i(tabsolute_importtdivisiontunicode_literalsi(t constants(t default_etreeu getTreeWalkerupprintudomuetreeugenshiu etree_lxmlcK`s|j}|tkr|dkrDddlm}|jt|u iudatau %s%s="%s"uEndTaguCommentu %suDoctypeupublicIdu%susystemIduu%su%su %su Charactersu%s"%s"uSpaceCharactersuBconcatenateCharacterTokens should have got rid of all Space tokensuUnknown token type, %su (uStartTaguEmptyTag( RRt namespacestprefixesRtsortedtitemstFalsetAssertionErrort ValueErrorR( twalkertoutputtindentRRtnstnametattrst namespacet localnametvalue((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pytpprintKsd  !    %"    "    "   " N(t__doc__t __future__RRRR Rt_utilsRt__all__R R RRR+(((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pyt s ' PK.e[J=""""#html5lib/treewalkers/etree_lxml.pyonu[ abc@`sddlmZmZmZddlmZddlmZddlm Z ddl m Z ddl m Z d Z d efd YZd efd YZdefdYZdefdYZde jfdYZdS(i(tabsolute_importtdivisiontunicode_literals(t text_type(tetreei(t tag_regexpi(tbase(t _ihatexmlcC`s7|dkrdSt|tr#|S|jddSdS(Nuasciiustrict(tNonet isinstanceRtdecode(ts((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt ensure_str s  tRootcB`s,eZdZdZdZdZRS(cC`s||_g|_yV|jjrg|jjt|t|jjt|jjt|jj nWnt k r{nXy|j }Wnt k r|}nXx"|j dk r|j }qWx,|dk r|jj||j}qWd|_d|_dS(N(t elementtreetchildrentdocinfot internalDTDtappendtDoctypeR t root_namet public_idt system_urltAttributeErrortgetroott getpreviousRtgetnextttextttail(tselftettnode((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__init__s*       cC`s |j|S(N(R(Rtkey((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __getitem__1scC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR4scC`sdS(Ni((R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__len__7s(t__name__t __module__R R"RR#(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR s   RcB`seZdZdZRS(cC`s:||_||_||_||_d|_d|_dS(N(t root_nodetnameRt system_idRRR(RR&R'RR(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR <s      cC`s|jjdS(Ni(R&R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyREs(R$R%R R(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR;s t FragmentRootcB`seZdZdZRS(cC`s9g|D]}t||^q|_d|_|_dS(N(tFragmentWrapperRRRR(RRtchild((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR Js%cC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyRNs(R$R%R R(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR)Is R*cB`sYeZdZdZdZdZdZdZdZdZ dZ RS( cC`s|||_||_t|jdr<t|jj|_n d|_t|jdrot|jj|_n d|_dS(Nutextutail(R&tobjthasattrR RRR(Rt fragment_rootR,((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR Ss   cC`st|j|S(N(tgetattrR,(RR'((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __getattr___scC`sE|jj}|j|}|t|dkr=||dSdSdS(Ni(R&RtindextlenR(Rtsiblingstidx((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyRbs   cC`s |j|S(N(R,(RR!((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR"jscC`s t|jS(N(tboolR,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__bool__mscC`sdS(N(R(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt getparentpscC`s t|jS(N(tstrR,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt__str__sscC`s t|jS(N(R8R,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt __unicode__vscC`s t|jS(N(R2R,(R((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR#ys( R$R%R R0RR"R6R7R9R:R#(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR*Rs       t TreeWalkercB`s5eZdZdZdZdZdZRS(cC`skt|tr-t||_t|}nt|_t|}tjj||t j |_ dS(N( R tlisttsettfragmentChildrenR)R RtNonRecursiveTreeWalkerR Rt InfosetFiltertfilter(Rttree((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR ~s  c C`st|tr7|\}}tjtt||fSt|trPtjfSt|tr{tj |j |j |j fSt|t rt|d rtjt|jfS|jtjkrtjt|jfS|jtjkrtjt|jdd!fStjt|j}|r:|j\}}nd}t|j}i}xt|jjD]n\}}t|}t|}tj|}|r|||jd|jdfRR7(RRR!((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyt getParentNodes  (R$R%R RVRWRXRY(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyR;}s  ) N(t __future__RRRtpip._vendor.sixRtlxmlRttreebuilders.etreeRtRRR tobjectR RR)R*R?R;(((sO/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyts & +PK.e["html5lib/treewalkers/base.pyonu[ abc @`sddlmZmZmZddlmZddlmZmZm Z ddddd d d d d g Z ej Z ej ZejZejZejZejZdZdje Z defdYZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(tNodei(t namespacest voidElementstspaceCharactersuDOCUMENTuDOCTYPEuTEXTuELEMENTuCOMMENTuENTITYuUNKNOWNu TreeWalkeruNonRecursiveTreeWalkeru <#UNKNOWN#>ut TreeWalkercB`steZdZdZdZedZdZdZdZ dZ d d dZ d Z d ZRS( cC`s ||_dS(N(ttree(tselfR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt__init__scC`s tdS(N(tNotImplementedError(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt__iter__scC`sidd6|d6S(NuSerializeErrorutypeudata((R tmsg((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyterrorscc`s<idd6|d6|d6|d6V|r8|jdVndS(NuEmptyTagutypeunameu namespaceudatauVoid element has children(R(R t namespacetnametattrst hasChildren((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytemptyTags  cC`s idd6|d6|d6|d6S(NuStartTagutypeunameu namespaceudata((R RRR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytstartTag%s cC`sidd6|d6|d6S(NuEndTagutypeunameu namespace((R RR((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytendTag+s cc`s|}|jt}|t|t| }|rKidd6|d6Vn|}|jt}|t|}|ridd6|d6Vn|ridd6|d6VndS(NuSpaceCharactersutypeudatau Characters(tlstripRtlentrstrip(R tdatatmiddletlefttright((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyttext0scC`sidd6|d6S(NuCommentutypeudata((R R((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytcomment>scC`s idd6|d6|d6|d6S(NuDoctypeutypeunameupublicIdusystemId((R RtpublicIdtsystemId((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytdoctypeAs cC`sidd6|d6S(NuEntityutypeuname((R R((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytentityGscC`s|jd|S(NuUnknown node type: (R(R tnodeType((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytunknownJsN(t__name__t __module__R R RtFalseRRRRRtNoneR!R"R$(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyRs         tNonRecursiveTreeWalkercB`s5eZdZdZdZdZdZRS(cC`s tdS(N(R (R tnode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytgetNodeDetailsOscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt getFirstChildRscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pytgetNextSiblingUscC`s tdS(N(R (R R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyt getParentNodeXsc c`s|j}x|dk r|j|}|d|d}}t}|tkr_|j|Vn |tkrx|j|D] }|Vq{Wn|tkr|\}}}}| s|t dkr|t krx%|j ||||D] }|VqWt}q|j |||Vni|t kr7|j|dVnH|tkrX|j|dVn'|tkrmt}n|j|dV|r|j|} nd} | dk r| }q x|dk r|j|}|d|d}}|tkr<|\}}}}|r|t dks%|t kr<|j||Vq<n|j|krUd}Pn|j|} | dk rz| }Pq|j|}qWq WdS(Niiuhtml(RR(R+R'tDOCTYPER!tTEXTRtELEMENTRRRRtCOMMENTRtENTITYR"tDOCUMENTtTrueR$R,RR-R.( R t currentNodetdetailsttypeRttokenRRt attributest firstChildt nextSibling((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyR [sZ     #          " (R%R&R+R,R-R.R (((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyR)Ns     N(t __future__RRRtxml.domRt constantsRRRt__all__t DOCUMENT_NODER4tDOCUMENT_TYPE_NODER/t TEXT_NODER0t ELEMENT_NODER1t COMMENT_NODER2t ENTITY_NODER3tUNKNOWNtjointobjectRR)(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyts       :PK.e[w html5lib/treewalkers/genshi.pycnu[ abc@`sddlmZmZmZddlmZddlmZmZmZm Z m Z ddlm Z m Z m Z mZmZmZddlmZddlmZmZd ejfd YZd S( i(tabsolute_importtdivisiontunicode_literals(tQName(tSTARTtENDt XML_NAMESPACEtDOCTYPEtTEXT(tSTART_NStEND_NSt START_CDATAt END_CDATAtPItCOMMENTi(tbasei(t voidElementst namespacest TreeWalkercB`seZdZdZRS(cc`sd}xH|jD]=}|dk rGx"|j||D] }|Vq5Wn|}qW|dk rx"|j|dD] }|VqpWndS(N(tNonettreettokens(tselftpreviousteventttoken((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.pyt__iter__ s    cc`s|\}}}|tkr |\}}|j}|j} i} xL|D]D\} } t| trz| | | j| jfs (.PK.e[!!html5lib/treewalkers/__init__.pyonu[ abc@`sdZddlmZmZmZddlmZddlmZdddd d d gZ iZ dd Z d Z dZdS(uA collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. i(tabsolute_importtdivisiontunicode_literalsi(t constants(t default_etreeu getTreeWalkerupprintudomuetreeugenshiu etree_lxmlcK`s|j}|tkr|dkrDddlm}|jt|u iudatau %s%s="%s"uEndTaguCommentu %suDoctypeupublicIdu%susystemIduu%su%su %su Charactersu%s"%s"uSpaceCharactersuUnknown token type, %su (uStartTaguEmptyTag( RRt namespacestprefixesRtsortedtitemst ValueErrorR( twalkertoutputtindentRRtnstnametattrst namespacet localnametvalue((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pytpprintKsd  !    %"    "    "   " N(t__doc__t __future__RRRR Rt_utilsRt__all__R R RRR)(((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pyt s ' PK.e[q[html5lib/treewalkers/dom.pycnu[ abc@`sYddlmZmZmZddlmZddlmZdejfdYZ dS(i(tabsolute_importtdivisiontunicode_literals(tNodei(tbaset TreeWalkercB`s,eZdZdZdZdZRS(cC`sX|jtjkr.tj|j|j|jfS|jtjtj fkrYtj |j fS|jtj kri}xgt |jjD]P}|j|}|jr|j||j|jfsPK.e[eohtml5lib/treewalkers/dom.pynu[from __future__ import absolute_import, division, unicode_literals from xml.dom import Node from . import base class TreeWalker(base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: return base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): return base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) if attr.namespaceURI: attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value return (base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: return base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): return (base.DOCUMENT,) else: return base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild def getNextSibling(self, node): return node.nextSibling def getParentNode(self, node): return node.parentNode PK.e[sD! html5lib/treewalkers/__init__.pynu["""A collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. """ from __future__ import absolute_import, division, unicode_literals from .. import constants from .._utils import default_etree __all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshi", "etree_lxml"] treeWalkerCache = {} def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support Args: treeType (str): the name of the tree type required (case-insensitive). Supported values are: - "dom": The xml.dom.minidom DOM implementation - "etree": A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). - "lxml": Optimized walker for lxml.etree - "genshi": a Genshi stream Implementation: A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree (Currently applies to the "etree" tree type only). """ treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType == "dom": from . import dom treeWalkerCache[treeType] = dom.TreeWalker elif treeType == "genshi": from . import genshi treeWalkerCache[treeType] = genshi.TreeWalker elif treeType == "lxml": from . import etree_lxml treeWalkerCache[treeType] = etree_lxml.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType) def concatenateCharacterTokens(tokens): pendingCharacters = [] for token in tokens: type = token["type"] if type in ("Characters", "SpaceCharacters"): pendingCharacters.append(token["data"]) else: if pendingCharacters: yield {"type": "Characters", "data": "".join(pendingCharacters)} pendingCharacters = [] yield token if pendingCharacters: yield {"type": "Characters", "data": "".join(pendingCharacters)} def pprint(walker): """Pretty printer for tree walkers""" output = [] indent = 0 for token in concatenateCharacterTokens(walker): type = token["type"] if type in ("StartTag", "EmptyTag"): # tag name if token["namespace"] and token["namespace"] != constants.namespaces["html"]: if token["namespace"] in constants.prefixes: ns = constants.prefixes[token["namespace"]] else: ns = token["namespace"] name = "%s %s" % (ns, token["name"]) else: name = token["name"] output.append("%s<%s>" % (" " * indent, name)) indent += 2 # attributes (sorted for consistent ordering) attrs = token["data"] for (namespace, localname), value in sorted(attrs.items()): if namespace: if namespace in constants.prefixes: ns = constants.prefixes[namespace] else: ns = namespace name = "%s %s" % (ns, localname) else: name = localname output.append("%s%s=\"%s\"" % (" " * indent, name, value)) # self-closing if type == "EmptyTag": indent -= 2 elif type == "EndTag": indent -= 2 elif type == "Comment": output.append("%s" % (" " * indent, token["data"])) elif type == "Doctype": if token["name"]: if token["publicId"]: output.append("""%s""" % (" " * indent, token["name"], token["publicId"], token["systemId"] if token["systemId"] else "")) elif token["systemId"]: output.append("""%s""" % (" " * indent, token["name"], token["systemId"])) else: output.append("%s" % (" " * indent, token["name"])) else: output.append("%s" % (" " * indent,)) elif type == "Characters": output.append("%s\"%s\"" % (" " * indent, token["data"])) elif type == "SpaceCharacters": assert False, "concatenateCharacterTokens should have got rid of all Space tokens" else: raise ValueError("Unknown token type, %s" % type) return "\n".join(output) PK.e[}11html5lib/treebuilders/etree.pynu[from __future__ import absolute_import, division, unicode_literals # pylint:disable=protected-access from pip._vendor.six import text_type import re from . import base from .. import _ihatexml from .. import constants from ..constants import namespaces from .._utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation, fullTree=False): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class Element(base.Node): def __init__(self, name, namespace=None): self._name = name self._namespace = namespace self._element = ElementTree.Element(self._getETreeTag(name, namespace)) if namespace is None: self.nameTuple = namespaces["html"], self._name else: self.nameTuple = self._namespace, self._name self.parent = None self._childNodes = [] self._flags = [] def _getETreeTag(self, name, namespace): if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) return etree_tag def _setName(self, name): self._name = name self._element.tag = self._getETreeTag(self._name, self._namespace) def _getName(self): return self._name name = property(_getName, _setName) def _setNamespace(self, namespace): self._namespace = namespace self._element.tag = self._getETreeTag(self._name, self._namespace) def _getNamespace(self): return self._namespace namespace = property(_getNamespace, _setNamespace) def _getAttributes(self): return self._element.attrib def _setAttributes(self, attributes): # Delete existing attributes first # XXX - there may be a better way to do this... for key in list(self._element.attrib.keys()): del self._element.attrib[key] for key, value in attributes.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], key[1]) else: name = key self._element.set(name, value) attributes = property(_getAttributes, _setAttributes) def _getChildNodes(self): return self._childNodes def _setChildNodes(self, value): del self._element[:] self._childNodes = [] for element in value: self.insertChild(element) childNodes = property(_getChildNodes, _setChildNodes) def hasContent(self): """Return true if the node has children or text""" return bool(self._element.text or len(self._element)) def appendChild(self, node): self._childNodes.append(node) self._element.append(node._element) node.parent = self def insertBefore(self, node, refNode): index = list(self._element).index(refNode._element) self._element.insert(index, node._element) node.parent = self def removeChild(self, node): self._childNodes.remove(node) self._element.remove(node._element) node.parent = None def insertText(self, data, insertBefore=None): if not(len(self._element)): if not self._element.text: self._element.text = "" self._element.text += data elif insertBefore is None: # Insert the text as the tail of the last child element if not self._element[-1].tail: self._element[-1].tail = "" self._element[-1].tail += data else: # Insert the text before the specified node children = list(self._element) index = children.index(insertBefore._element) if index > 0: if not self._element[index - 1].tail: self._element[index - 1].tail = "" self._element[index - 1].tail += data else: if not self._element.text: self._element.text = "" self._element.text += data def cloneNode(self): element = type(self)(self.name, self.namespace) for name, value in self.attributes.items(): element.attributes[name] = value return element def reparentChildren(self, newParent): if newParent.childNodes: newParent.childNodes[-1]._element.tail += self._element.text else: if not newParent._element.text: newParent._element.text = "" if self._element.text is not None: newParent._element.text += self._element.text self._element.text = "" base.Node.reparentChildren(self, newParent) class Comment(Element): def __init__(self, data): # Use the superclass constructor to set all properties on the # wrapper element self._element = ElementTree.Comment(data) self.parent = None self._childNodes = [] self._flags = [] def _getData(self): return self._element.text def _setData(self, value): self._element.text = value data = property(_getData, _setData) class DocumentType(Element): def __init__(self, name, publicId, systemId): Element.__init__(self, "") self._element.text = name self.publicId = publicId self.systemId = systemId def _getPublicId(self): return self._element.get("publicId", "") def _setPublicId(self, value): if value is not None: self._element.set("publicId", value) publicId = property(_getPublicId, _setPublicId) def _getSystemId(self): return self._element.get("systemId", "") def _setSystemId(self, value): if value is not None: self._element.set("systemId", value) systemId = property(_getSystemId, _setSystemId) class Document(Element): def __init__(self): Element.__init__(self, "DOCUMENT_ROOT") class DocumentFragment(Element): def __init__(self): Element.__init__(self, "DOCUMENT_FRAGMENT") def testSerializer(element): rv = [] def serializeElement(element, indent=0): if not(hasattr(element, "tag")): element = element.getroot() if element.tag == "": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""""" % (element.text, publicId, systemId)) else: rv.append("" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": rv.append("#document") if element.text is not None: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") elif element.tag == ElementTreeCommentType: rv.append("|%s" % (' ' * indent, element.text)) else: assert isinstance(element.tag, text_type), \ "Expected unicode, got %s, %s" % (type(element.tag), element.tag) nsmatch = tag_regexp.match(element.tag) if nsmatch is None: name = element.tag else: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] name = "%s %s" % (prefix, name) rv.append("|%s<%s>" % (' ' * indent, name)) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = name attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) return "\n".join(rv) def tostring(element): # pylint:disable=unused-variable """Serialize an element and its child nodes to a string""" rv = [] filter = _ihatexml.InfosetFilter() def serializeElement(element): if isinstance(element, ElementTree.ElementTree): element = element.getroot() if element.tag == "": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""""" % (element.text, publicId, systemId)) else: rv.append("" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": if element.text is not None: rv.append(element.text) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") for child in element: serializeElement(child) elif element.tag == ElementTreeCommentType: rv.append("" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (filter.fromXmlName(element.tag),)) else: attr = " ".join(["%s=\"%s\"" % ( filter.fromXmlName(name), value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("" % (element.tag,)) if element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv) class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable documentClass = Document doctypeClass = DocumentType elementClass = Element commentClass = Comment fragmentClass = DocumentFragment implementation = ElementTreeImplementation def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._element else: if self.defaultNamespace is not None: return self.document._element.find( "{%s}html" % self.defaultNamespace) else: return self.document._element.find("html") def getFragment(self): return base.TreeBuilder.getFragment(self)._element return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder) PK.e[8q--html5lib/treebuilders/dom.pyonu[ abc@`sddlmZmZmZddlmZddlmZmZddl Z ddl m Z ddl m Z dd l m Z dd lmZd ZeeZdS( i(tabsolute_importtdivisiontunicode_literals(tMutableMapping(tminidomtNodeNi(tbasei(t constants(t namespaces(tmoduleFactoryFactoryc`svdtfdYdtjffdYdtjffdY}dtS(NtAttrListcB`sPeZdZdZdZdZdZdZdZdZ RS(cS`s ||_dS(N(telement(tselfR ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt__init__scS`st|jjjS(N(titerR t attributestkeys(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt__iter__scS`sJt|trtn.|jjj|}||_||jj|:scS`s ||_|jj|jdS(N(tparentR t appendChild(R tnode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR+=s cS`sH|jjj|}|r4|jj||jn|jj|dS(N(R RtcreateTextNodet insertBeforeR+(R tdataR.ttext((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt insertTextAscS`s&|jj|j|j||_dS(N(R R.R*(R R,trefNode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR.HscS`s8|jj|jkr+|jj|jnd|_dS(N(R t parentNodet removeChildR(R*(R R,((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR4LscS`sOx?|jjrA|jj}|jj||jj|qWg|_dS(N(R t hasChildNodest firstChildR4R+t childNodes(R t newParenttchild((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pytreparentChildrenQs  c`s |jS(N(R (R (R (sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getAttributesXscS`s|rxt|jD]~\}}t|tr|ddk r]|dd|d}n |d}|jj|d||q|jj||qWndS(Niu:ii(RRRRR(R tsetAttributeNSt setAttribute(R RRRt qualifiedName((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt setAttributes[s   c`s|jjtS(N(R t cloneNodetFalse(R (R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR@jscS`s |jjS(N(R R5(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt hasContentmscS`s4|jdkr td|jfS|j|jfSdS(Nuhtml(t namespaceR(RR(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getNameTuplepsN(R"R#R tpropertyRCR+R(R1R.R4R:R;R?RR@RBRDt nameTuple((R R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR$5s         t TreeBuilderc`seZfdZfdZd fdZfdZfdZdZfdZ dZ dZ d d Z Z d ZRS( c`s+jjddd|_tj|S(N(tgetDOMImplementationtcreateDocumentR(tdomtweakreftproxy(R (tDom(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt documentClassysc`st|d}|d}|d}j}|j|||}|jj|tkrp|j|_ndS(NunameupublicIdusystemId(RHtcreateDocumentTypetdocumentR+RRJR(R ttokenRtpublicIdtsystemIdtdomimpltdoctype(RMR$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt insertDoctype}s     c`sO|dkr0|jdkr0|jj|}n|jj||}|S(N(R(tdefaultNamespaceRJt createElementtcreateElementNS(R RRCR,(R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt elementClasssc`s|jj|S(N(RJt createComment(R R/(R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt commentClasssc`s|jjS(N(RJtcreateDocumentFragment(R (R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt fragmentClassscS`s|jj|jdS(N(RJR+R (R R,((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR+sc`s |S(N((R R (ttestSerializer(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR_scS`s|jS(N(RJ(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getDocumentscS`stjj|jS(N(RRGt getFragmentR (R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRascS`s|}||kr+tjj|||nwt|jdrtj|jjkrt|jj|j_|jjj tjqn|jj |jj |dS(Nu_child_node_types( RRGR1R&RJRt TEXT_NODEt_child_node_typesRtappendR+R-(R R/R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR1s N(R"R#RNRVR(RZR\R^R+R_R`RaR1timplementationR((RMtDomImplementationR$R_(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRGxs     c`s?|jgdfd|ddjS(Nic `s|jtjkr|jr|js-|jrr|jp9d}|jpHd}jdd||j||fqjdd||jfqjdd|fn|jtjkrjdn|jtjkrjdn|jtj kr%jdd||j fnu|jtj krXjd d||j fnBt |d r|j dk rd tj|j |jf}n |j}jd d||f|jrg}xtt|jD]r}|jj|}|j}|j}|j } | r8d tj| |jf}n |j}|j||fqWx?t|D].\}}jd d|d||fqeWn|d7}x|jD]} | |qWdS(Nuu|%su u|%su|%su #documentu#document-fragmentu|%su|%s"%s"u namespaceURIu%s %su|%s<%s>u |%s%s="%s"i(tnodeTypeRtDOCUMENT_TYPE_NODERRRRSRdt DOCUMENT_NODEtDOCUMENT_FRAGMENT_NODEt COMMENT_NODEt nodeValueRbR&R'R(RtprefixesR%t hasAttributestrangeRRtitemRt localNametsortedR7( R tindentRRRSRRtiRRtnsR9(trvtserializeElement(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRwsN  !!!      ) u (t normalizetjoin(R ((RvRwsI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR_s  . (RRRRGtlocals(RfRG((R RMRfR$R_sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getDomBuilders $"C(: 6(t __future__RRRt collectionsRtxml.domRRRKtRRRt_utilsR R{t getDomModule(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyts  PK.e[DVv6v6html5lib/treebuilders/base.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from ..constants import scopingElements, tableInsertModeElements, namespaces # The scope markers are inserted when entering object elements, # marquees, table cells, and table captions, and are used to prevent formatting # from "leaking" into tables, object elements, and marquees. Marker = None listElementsMap = { None: (frozenset(scopingElements), False), "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), (namespaces["html"], "ul")])), False), "table": (frozenset([(namespaces["html"], "html"), (namespaces["html"], "table")]), False), "select": (frozenset([(namespaces["html"], "optgroup"), (namespaces["html"], "option")]), True) } class Node(object): def __init__(self, name): """Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node """ self.name = name self.parent = None self.value = None self.attributes = {} self.childNodes = [] self._flags = [] def __str__(self): attributesStr = " ".join(["%s=\"%s\"" % (name, value) for name, value in self.attributes.items()]) if attributesStr: return "<%s %s>" % (self.name, attributesStr) else: return "<%s>" % (self.name) def __repr__(self): return "<%s>" % (self.name) def appendChild(self, node): """Insert node as a child of the current node """ raise NotImplementedError def insertText(self, data, insertBefore=None): """Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. """ raise NotImplementedError def insertBefore(self, node, refNode): """Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current node""" raise NotImplementedError def removeChild(self, node): """Remove node from the children of the current node """ raise NotImplementedError def reparentChildren(self, newParent): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way """ # XXX - should this method be made more general? for child in self.childNodes: newParent.appendChild(child) self.childNodes = [] def cloneNode(self): """Return a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes """ raise NotImplementedError def hasContent(self): """Return true if the node has children or text, false otherwise """ raise NotImplementedError class ActiveFormattingElements(list): def append(self, node): equalCount = 0 if node != Marker: for element in self[::-1]: if element == Marker: break if self.nodesEqual(element, node): equalCount += 1 if equalCount == 3: self.remove(element) break list.append(self, node) def nodesEqual(self, node1, node2): if not node1.nameTuple == node2.nameTuple: return False if not node1.attributes == node2.attributes: return False return True class TreeBuilder(object): """Base treebuilder implementation documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes """ # pylint:disable=not-callable # Document class documentClass = None # The class to use for creating a node elementClass = None # The class to use for creating comments commentClass = None # The class to use for creating doctypes doctypeClass = None # Fragment class fragmentClass = None def __init__(self, namespaceHTMLElements): if namespaceHTMLElements: self.defaultNamespace = "http://www.w3.org/1999/xhtml" else: self.defaultNamespace = None self.reset() def reset(self): self.openElements = [] self.activeFormattingElements = ActiveFormattingElements() # XXX - rename these to headElement, formElement self.headPointer = None self.formPointer = None self.insertFromTable = False self.document = self.documentClass() def elementInScope(self, target, variant=None): # If we pass a node in we match that. if we pass a string # match any node with that name exactNode = hasattr(target, "nameTuple") if not exactNode: if isinstance(target, text_type): target = (namespaces["html"], target) assert isinstance(target, tuple) listElements, invert = listElementsMap[variant] for node in reversed(self.openElements): if exactNode and node == target: return True elif not exactNode and node.nameTuple == target: return True elif (invert ^ (node.nameTuple in listElements)): return False assert False # We should never reach this point def reconstructActiveFormattingElements(self): # Within this algorithm the order of steps described in the # specification is not quite the same as the order of steps in the # code. It should still do the same though. # Step 1: stop the algorithm when there's nothing to do. if not self.activeFormattingElements: return # Step 2 and step 3: we start with the last element. So i is -1. i = len(self.activeFormattingElements) - 1 entry = self.activeFormattingElements[i] if entry == Marker or entry in self.openElements: return # Step 6 while entry != Marker and entry not in self.openElements: if i == 0: # This will be reset to 0 below i = -1 break i -= 1 # Step 5: let entry be one earlier in the list. entry = self.activeFormattingElements[i] while True: # Step 7 i += 1 # Step 8 entry = self.activeFormattingElements[i] clone = entry.cloneNode() # Mainly to get a new copy of the attributes # Step 9 element = self.insertElement({"type": "StartTag", "name": clone.name, "namespace": clone.namespace, "data": clone.attributes}) # Step 10 self.activeFormattingElements[i] = element # Step 11 if element == self.activeFormattingElements[-1]: break def clearActiveFormattingElements(self): entry = self.activeFormattingElements.pop() while self.activeFormattingElements and entry != Marker: entry = self.activeFormattingElements.pop() def elementInActiveFormattingElements(self, name): """Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false""" for item in self.activeFormattingElements[::-1]: # Check for Marker first because if it's a Marker it doesn't have a # name attribute. if item == Marker: break elif item.name == name: return item return False def insertRoot(self, token): element = self.createElement(token) self.openElements.append(element) self.document.appendChild(element) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = self.doctypeClass(name, publicId, systemId) self.document.appendChild(doctype) def insertComment(self, token, parent=None): if parent is None: parent = self.openElements[-1] parent.appendChild(self.commentClass(token["data"])) def createElement(self, token): """Create an element but don't insert it anywhere""" name = token["name"] namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] return element def _getInsertFromTable(self): return self._insertFromTable def _setInsertFromTable(self, value): """Switch the function used to insert an element from the normal one to the misnested table one and back again""" self._insertFromTable = value if value: self.insertElement = self.insertElementTable else: self.insertElement = self.insertElementNormal insertFromTable = property(_getInsertFromTable, _setInsertFromTable) def insertElementNormal(self, token): name = token["name"] assert isinstance(name, text_type), "Element %s not unicode" % name namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] self.openElements[-1].appendChild(element) self.openElements.append(element) return element def insertElementTable(self, token): """Create an element and insert it into the tree""" element = self.createElement(token) if self.openElements[-1].name not in tableInsertModeElements: return self.insertElementNormal(token) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() if insertBefore is None: parent.appendChild(element) else: parent.insertBefore(element, insertBefore) self.openElements.append(element) return element def insertText(self, data, parent=None): """Insert text data.""" if parent is None: parent = self.openElements[-1] if (not self.insertFromTable or (self.insertFromTable and self.openElements[-1].name not in tableInsertModeElements)): parent.insertText(data) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() parent.insertText(data, insertBefore) def getTableMisnestedNodePosition(self): """Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node""" # The foster parent element is the one which comes before the most # recently opened table element # XXX - this is really inelegant lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::-1]: if elm.name == "table": lastTable = elm break if lastTable: # XXX - we should really check that this parent is actually a # node here if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[ self.openElements.index(lastTable) - 1] else: fosterParent = self.openElements[0] return fosterParent, insertBefore def generateImpliedEndTags(self, exclude=None): name = self.openElements[-1].name # XXX td, th and tr are not actually needed if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and name != exclude): self.openElements.pop() # XXX This is not entirely what the specification says. We should # investigate it more closely. self.generateImpliedEndTags(exclude) def getDocument(self): "Return the final tree" return self.document def getFragment(self): "Return the final fragment" # assert self.innerHTML fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) return fragment def testSerializer(self, node): """Serialize the subtree of node in the format required by unit tests node - the node from which to start serializing""" raise NotImplementedError PK.e[JQ7Q7#html5lib/treebuilders/etree_lxml.pynu["""Module for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning """ from __future__ import absolute_import, division, unicode_literals # pylint:disable=protected-access import warnings import re import sys from . import base from ..constants import DataLossWarning from .. import constants from . import etree as etree_builders from .. import _ihatexml import lxml.etree as etree fullTree = True tag_regexp = re.compile("{([^}]*)}(.*)") comment_type = etree.Comment("asd").tag class DocumentType(object): def __init__(self, name, publicId, systemId): self.name = name self.publicId = publicId self.systemId = systemId class Document(object): def __init__(self): self._elementTree = None self._childNodes = [] def appendChild(self, element): self._elementTree.getroot().addnext(element._element) def _getChildNodes(self): return self._childNodes childNodes = property(_getChildNodes) def testSerializer(element): rv = [] infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) def serializeElement(element, indent=0): if not hasattr(element, "tag"): if hasattr(element, "getroot"): # Full tree case rv.append("#document") if element.docinfo.internalDTD: if not (element.docinfo.public_id or element.docinfo.system_url): dtd_str = "" % element.docinfo.root_name else: dtd_str = """""" % ( element.docinfo.root_name, element.docinfo.public_id, element.docinfo.system_url) rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) next_element = element.getroot() while next_element.getprevious() is not None: next_element = next_element.getprevious() while next_element is not None: serializeElement(next_element, indent + 2) next_element = next_element.getnext() elif isinstance(element, str) or isinstance(element, bytes): # Text in a fragment assert isinstance(element, str) or sys.version_info[0] == 2 rv.append("|%s\"%s\"" % (' ' * indent, element)) else: # Fragment case rv.append("#document-fragment") for next_element in element: serializeElement(next_element, indent + 2) elif element.tag == comment_type: rv.append("|%s" % (' ' * indent, element.text)) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) else: assert isinstance(element, etree._Element) nsmatch = etree_builders.tag_regexp.match(element.tag) if nsmatch is not None: ns = nsmatch.group(1) tag = nsmatch.group(2) prefix = constants.prefixes[ns] rv.append("|%s<%s %s>" % (' ' * indent, prefix, infosetFilter.fromXmlName(tag))) else: rv.append("|%s<%s>" % (' ' * indent, infosetFilter.fromXmlName(element.tag))) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() name = infosetFilter.fromXmlName(name) prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = infosetFilter.fromXmlName(name) attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "" % element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif element.tag == comment_type: rv.append("" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (element.tag,)) else: attr = " ".join(["%s=\"%s\"" % (name, value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("" % (element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv) class TreeBuilder(base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None commentClass = None fragmentClass = Document implementation = etree def __init__(self, namespaceHTMLElements, fullTree=False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) self.namespaceHTMLElements = namespaceHTMLElements class Attributes(dict): def __init__(self, element, value=None): if value is None: value = {} self._element = element dict.__init__(self, value) # pylint:disable=non-parent-init-called for key, value in self.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value def __setitem__(self, key, value): dict.__setitem__(self, key, value) if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value class Element(builder.Element): def __init__(self, name, namespace): name = infosetFilter.coerceElement(name) builder.Element.__init__(self, name, namespace=namespace) self._attributes = Attributes(self) def _setName(self, name): self._name = infosetFilter.coerceElement(name) self._element.tag = self._getETreeTag( self._name, self._namespace) def _getName(self): return infosetFilter.fromXmlName(self._name) name = property(_getName, _setName) def _getAttributes(self): return self._attributes def _setAttributes(self, attributes): self._attributes = Attributes(self, attributes) attributes = property(_getAttributes, _setAttributes) def insertText(self, data, insertBefore=None): data = infosetFilter.coerceCharacters(data) builder.Element.insertText(self, data, insertBefore) def appendChild(self, child): builder.Element.appendChild(self, child) class Comment(builder.Comment): def __init__(self, data): data = infosetFilter.coerceComment(data) builder.Comment.__init__(self, data) def _setData(self, data): data = infosetFilter.coerceComment(data) self._element.text = data def _getData(self): return self._element.text data = property(_getData, _setData) self.elementClass = Element self.commentClass = Comment # self.fragmentClass = builder.DocumentFragment base.TreeBuilder.__init__(self, namespaceHTMLElements) def reset(self): base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._elementTree else: return self.document._elementTree.getroot() def getFragment(self): fragment = [] element = self.openElements[0]._element if element.text: fragment.append(element.text) fragment.extend(list(element)) if element.tail: fragment.append(element.tail) return fragment def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] if not name: warnings.warn("lxml cannot represent empty doctype", DataLossWarning) self.doctype = None else: coercedName = self.infosetFilter.coerceElement(name) if coercedName != name: warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) doctype = self.doctypeClass(coercedName, publicId, systemId) self.doctype = doctype def insertCommentInitial(self, data, parent=None): assert parent is None or parent is self.document assert self.document._elementTree is None self.initial_comments.append(data) def insertCommentMain(self, data, parent=None): if (parent == self.document and self.document._elementTree.getroot()[-1].tag == comment_type): warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) super(TreeBuilder, self).insertComment(data, parent) def insertRoot(self, token): """Create the document root""" # Because of the way libxml2 works, it doesn't seem to be possible to # alter information like the doctype after the tree has been parsed. # Therefore we need to use the built-in parser to create our initial # tree, after which we can add elements like normal docStr = "" if self.doctype: assert self.doctype.name docStr += "= 0 and sysid.find('"') >= 0: warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) sysid = sysid.replace("'", 'U00027') if sysid.find("'") >= 0: docStr += '"%s"' % sysid else: docStr += "'%s'" % sysid else: docStr += "''" docStr += ">" if self.doctype.name != token["name"]: warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) docStr += "" root = etree.fromstring(docStr) # Append the initial comments: for comment_token in self.initial_comments: comment = self.commentClass(comment_token["data"]) root.addprevious(comment._element) # Create the root document and add the ElementTree to it self.document = self.documentClass() self.document._elementTree = root.getroottree() # Give the root element the right name name = token["name"] namespace = token.get("namespace", self.defaultNamespace) if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) root.tag = etree_tag # Add the root element to the internal child/open data structures root_element = self.elementClass(name, namespace) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) # Reset to the default insert comment function self.insertComment = self.insertCommentMain PK.e[99html5lib/treebuilders/etree.pycnu[ abc@`sddlmZmZmZddlmZddlZddlmZddlm Z ddlm Z dd l m Z dd l m Z ejd Zed Ze eZdS( i(tabsolute_importtdivisiontunicode_literals(t text_typeNi(tbasei(t _ihatexml(t constants(t namespaces(tmoduleFactoryFactoryu {([^}]*)}(.*)c `sjdjdtjffdYdffdYdffdYdffd Yd ffd Yfd fd }dtjf fdY}tS(NuasdtElementc`seZdfdZdZdZdZeeeZdZ dZ ee e Z dZ dZ ee e ZdZd ZeeeZd Zd Zd Zd ZddZdZdZRS(c`s||_||_j|j|||_|dkrUtd|jf|_n|j|jf|_d|_g|_ g|_ dS(Nuhtml( t_namet _namespaceR t _getETreeTagt_elementtNoneRt nameTupletparentt _childNodest_flags(tselftnamet namespace(t ElementTree(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt__init__s     cS`s)|dkr|}nd||f}|S(Nu{%s}%s(R(RRRt etree_tag((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyR #s  cS`s+||_|j|j|j|j_dS(N(R R R R ttag(RR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setName*s cS`s|jS(N(R (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getName.scS`s+||_|j|j|j|j_dS(N(R R R R R(RR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setNamespace3s cS`s|jS(N(R (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getNamespace7scS`s |jjS(N(R tattrib(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getAttributes<scS`sx-t|jjjD]}|jj|=qWx]|jD]O\}}t|trsd|d|df}n|}|jj||q=WdS(Nu{%s}%sii(tlistR Rtkeystitemst isinstancettupletset(Rt attributestkeytvalueR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setAttributes?scS`s|jS(N(R(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getChildNodesMscS`s2|j2g|_x|D]}|j|qWdS(N(R Rt insertChild(RR(telement((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setChildNodesPs  cS`st|jjpt|jS(u,Return true if the node has children or text(tboolR ttexttlen(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt hasContentXscS`s0|jj||jj|j||_dS(N(RtappendR R(Rtnode((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt appendChild\scS`s>t|jj|j}|jj||j||_dS(N(R R tindextinsertR(RR3trefNodeR5((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt insertBeforeascS`s0|jj||jj|jd|_dS(N(RtremoveR RR(RR3((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt removeChildfscS`s-t|js?|jjs*d|j_n|jj|7_n|dkr|jdjsnd|jd_n|jdj|7_nt|j}|j|j}|dkr|j|djsd|j|d_n|j|dj|7_n-|jjsd|j_n|jj|7_dS(Nuiii(R0R R/RttailR R5(RtdataR8tchildrenR5((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt insertTextks"    cS`sLt||j|j}x*|jjD]\}}||j|R@RC((R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyR s*               tCommentc`s8eZfdZdZdZeeeZRS(c`s1j||_d|_g|_g|_dS(N(RHR RRRR(RR<(R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs  cS`s |jjS(N(R R/(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getDatascS`s||j_dS(N(R R/(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setDatas(RERFRRIRJRGR<((R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRHs  t DocumentTypec`sYeZfdZdZdZeeeZdZdZeeeZ RS(c`s2j|d||j_||_||_dS(Nu (RR R/tpublicIdtsystemId(RRRLRM(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs  cS`s|jjddS(NupublicIdu(R tget(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getPublicIdscS`s&|dk r"|jjd|ndS(NupublicId(RR R%(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setPublicIds cS`s|jjddS(NusystemIdu(R RN(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getSystemIdscS`s&|dk r"|jjd|ndS(NusystemId(RR R%(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setSystemIds ( RERFRRORPRGRLRQRRRM((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRKs    tDocumentc`seZfdZRS(c`sj|ddS(Nu DOCUMENT_ROOT(R(R(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs(RERFR((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRSstDocumentFragmentc`seZfdZRS(c`sj|ddS(NuDOCUMENT_FRAGMENT(R(R(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs(RERFR((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRTsc`s8gdfd|ddjS(Nic `st|ds|j}n|jdkr|jdsK|jdr|jdp]d}|jdprd}jd|j||fq0jd|jfn|jdkrMjd |jdk rjd d |d |jfn|jdk rtd nt|dr0t |j r0tdq0n|jkr}jdd ||jfnt |jt st dt|j|jftj|j}|dkr|j}n/|j\}}tj|}d||f}jdd ||ft|drg}x|j jD]r\}} tj|}|dk r|j\}}tj|}d||f} n|} |j| | fqJWx?t|D].\}} jdd |d || fqWn|jr0jd d |d |jfn|d 7}x|D]} | |qAW|jrjd d |d |jfndS(Nutagu upublicIdusystemIduuu u DOCUMENT_ROOTu #documentu|%s"%s"u iuDocument node cannot have tailuattribu$Document node cannot have attributesu|%suExpected unicode, got %s, %su%s %su|%s<%s>u |%s%s="%s"(thasattrtgetrootRRNR2R/RR;t TypeErrorR0RR#RtAssertionErrorR?t tag_regexptmatchtgroupsRtprefixesR"tsorted( R,tindentRLRMtnsmatchRtnstprefixR&R(t attr_stringtchild(tElementTreeCommentTypetrvtserializeElement(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRfs^  %!     ) %   u (tjoin(R,(Rd(ReRfsK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyttestSerializers7 c`sDgtjfd|djS(u4Serialize an element and its child nodes to a stringc`smt|jr!|j}n|jdkr|jdsN|jdr|jdp`d}|jdpud}jd|j||fqMjd|jfn|jdkrL|jdk rj|jn|jdk rt dnt |d r.t |j r.t d nx|D]}|q5Wn|jkrujd |jfn|j sjd j |jfn^d jg|j jD]%\}}dj ||f^q}jd|j|f|jrj|jnx|D]}|q"Wjd|jf|jrij|jndS(Nu upublicIdusystemIduuu u DOCUMENT_ROOTuDocument node cannot have tailuattribu$Document node cannot have attributesu u<%s>u u%s="%s"u<%s %s>u(R#RRVRRNR2R/RR;RWRUR0Rt fromXmlNameRgR"(R,RLRMRcRR(tattr(RRdtfilterReRf(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRfs@   # ;   u(Rt InfosetFilterRg(R,(RRd(RkReRfsK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyttostrings  - t TreeBuilderc`sSeZZZZZZZfdZfdZ dZ RS(c`s |S(N((RR,(Rh(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRhAsc`sPr|jjS|jdk r9|jjjd|jS|jjjdSdS(Nu{%s}htmluhtml(tdocumentR tdefaultNamespaceRtfind(R(tfullTree(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt getDocumentDs   cS`stjj|jS(N(RRnt getFragmentR (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRtNs( RERFt documentClasst doctypeClasst elementClasst commentClasst fragmentClasstimplementationRhRsRt((RHRSRTRKR tElementTreeImplementationRrRh(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRn9s (RHRRRBRntlocals(R{RrRmRn(( RHRSRTRKR RRdR{RrRhsK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pytgetETreeBuilders~>64(t __future__RRRtpip._vendor.sixRtretRRRRt_utilsRtcompileRYtFalseR}tgetETreeModule(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyts  DPK.e[:N77html5lib/treebuilders/base.pycnu[ abc@`shddlmZmZmZddlmZddlmZmZm Z dZ ie ee fd6e eee ddfgBe fd6e eee ddfe ddfgBe fd 6e e ddfe dd fge fd 6e e dd fe dd fgefd 6ZdefdYZdefdYZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(t text_typei(tscopingElementsttableInsertModeElementst namespacesuhtmlubuttonuoluululistutableuoptgroupuoptionuselecttNodecB`seeZdZdZdZdZd dZdZdZ dZ dZ d Z RS( cC`s:||_d|_d|_i|_g|_g|_dS(u6Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node N(tnametNonetparenttvaluet attributest childNodest_flags(tselfR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__init__s     cC`sadjg|jjD]\}}d||f^q}|rRd|j|fSd|jSdS(Nu u%s="%s"u<%s %s>u<%s>(tjoinR titemsR(RRR t attributesStr((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__str__*s  2cC`s d|jS(Nu<%s>(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__repr__3scC`s tdS(u3Insert node as a child of the current node N(tNotImplementedError(Rtnode((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt appendChild6scC`s tdS(uInsert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. N(R(Rtdatat insertBefore((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertText;scC`s tdS(uInsert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current nodeN(R(RRtrefNode((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRAscC`s tdS(u:Remove node from the children of the current node N(R(RR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt removeChildGscC`s.x|jD]}|j|q Wg|_dS(uMove all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way N(R R(Rt newParenttchild((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytreparentChildrenLscC`s tdS(uReturn a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes N(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt cloneNodeVscC`s tdS(uFReturn true if the node has children or text, false otherwise N(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt hasContent\sN( t__name__t __module__RRRRR RRRR R!R"(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRs       tActiveFormattingElementscB`seZdZdZRS(cC`sd}|tkrxj|dddD]R}|tkr<Pn|j||r[|d7}n|dkr&|j|Pq&q&Wntj||dS(Niiii(tMarkert nodesEqualtremovetlisttappend(RRt equalCounttelement((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR*cs      cC`s0|j|jkstS|j|jks,tStS(N(t nameTupletFalseR tTrue(Rtnode1tnode2((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR'ps (R#R$R*R'(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR%bs t TreeBuildercB`seZdZdZdZdZdZdZdZ dZ ddZ dZ dZ dZdZdZdd Zd Zd Zd ZeeeZd ZdZddZdZddZdZdZdZRS(uBase treebuilder implementation documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes cC`s)|rd|_n d|_|jdS(Nuhttp://www.w3.org/1999/xhtml(tdefaultNamespaceR treset(RtnamespaceHTMLElements((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRs  cC`sCg|_t|_d|_d|_t|_|j|_ dS(N( t openElementsR%tactiveFormattingElementsR t headPointert formPointerR.tinsertFromTablet documentClasstdocument(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR4s      cC`st|d}|sOt|tr7td|f}nt|tsOtnt|\}}x^t|jD]M}|r||krt S| r|j |krt S||j |kArot SqoWt stdS(Nu nameTupleuhtml( thasattrt isinstanceRRttupletAssertionErrortlistElementsMaptreversedR6R/R-R.(Rttargettvariantt exactNodet listElementstinvertR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytelementInScopescC`s'|js dSt|jd}|j|}|tksH||jkrLdSxL|tkr||jkr|dkrd}Pn|d8}|j|}qOWxtr"|d7}|j|}|j}|jidd6|jd6|jd6|j d6}||j|<||jdkrPqqWdS( NiiiuStartTagutypeunameu namespaceudata( R7tlenR&R6R/R!t insertElementRt namespaceR (RtitentrytcloneR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt#reconstructActiveFormattingElementss.           cC`s>|jj}x(|jr9|tkr9|jj}qWdS(N(R7tpopR&(RRM((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytclearActiveFormattingElementsscC`sHxA|jdddD])}|tkr-Pq|j|kr|SqWtS(uCheck if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return falseNi(R7R&RR.(RRtitem((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt!elementInActiveFormattingElementss  cC`s3|j|}|jj||jj|dS(N(t createElementR6R*R<R(RttokenR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertRootscC`sG|d}|d}|d}|j|||}|jj|dS(NunameupublicIdusystemId(t doctypeClassR<R(RRURtpublicIdtsystemIdtdoctype((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertDoctypes    cC`s:|dkr|jd}n|j|j|ddS(Niudata(R R6Rt commentClass(RRUR ((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertComment s cC`sB|d}|jd|j}|j||}|d|_|S(u.Create an element but don't insert it anywhereunameu namespaceudata(tgetR3t elementClassR (RRURRKR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRTs   cC`s|jS(N(t_insertFromTable(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt_getInsertFromTablescC`s.||_|r|j|_n |j|_dS(usSwitch the function used to insert an element from the normal one to the misnested table one and back againN(R`tinsertElementTableRJtinsertElementNormal(RR ((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt_setInsertFromTables cC`s|d}t|ts)td||jd|j}|j||}|d|_|jdj||jj ||S(NunameuElement %s not unicodeu namespaceudatai( R>RR@R^R3R_R R6RR*(RRURRKR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRc$s  cC`s|j|}|jdjtkr2|j|S|j\}}|dkr`|j|n|j|||jj ||S(u-Create an element and insert it into the treeiN( RTR6RRRctgetTableMisnestedNodePositionR RRR*(RRUR,R R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRb.s  cC`s{|dkr|jd}n|j sE|jrU|jdjtkrU|j|n"|j\}}|j||dS(uInsert text data.iN(R R6R:RRRRe(RRR R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR>s   cC`sd}d}d}x7|jdddD]}|jdkr)|}Pq)q)W|r|jrm|j}|}q|j|jj|d}n |jd}||fS(usGet the foster parent element, and sibling to insert before (or None) when inserting a misnested table nodeNiutableii(R R6RR tindex(Rt lastTablet fosterParentRtelm((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyReMs    c C`sO|jdj}|td krK||krK|jj|j|ndS( Niuddudtuliuoptionuoptgroupupurpurt(uddudtuliuoptionuoptgroupupurpurt(R6Rt frozensetRPtgenerateImpliedEndTags(RtexcludeR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRkgs   cC`s|jS(uReturn the final tree(R<(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt getDocumentqscC`s$|j}|jdj||S(uReturn the final fragmenti(t fragmentClassR6R (Rtfragment((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt getFragmentus cC`s tdS(uzSerialize the subtree of node in the format required by unit tests node - the node from which to start serializingN(R(RR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyttestSerializer|sN(R#R$t__doc__R R;R_R\RWRnRR4RHRORQRSRVR[R]RTRaRdtpropertyR:RcRbRReRkRmRpRq(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR2zs6   .             N(t __future__RRRtpip._vendor.sixRt constantsRRRR R&RjR.tsetR/RAtobjectRR)R%R2(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyts*! KPK.e[VcJ9J9html5lib/treebuilders/etree.pyonu[ abc@`sddlmZmZmZddlmZddlZddlmZddlm Z ddlm Z dd l m Z dd l m Z ejd Zed Ze eZdS( i(tabsolute_importtdivisiontunicode_literals(t text_typeNi(tbasei(t _ihatexml(t constants(t namespaces(tmoduleFactoryFactoryu {([^}]*)}(.*)c `sjdjdtjffdYdffdYdffdYdffd Yd ffd Yfd fd }dtjf fdY}tS(NuasdtElementc`seZdfdZdZdZdZeeeZdZ dZ ee e Z dZ dZ ee e ZdZd ZeeeZd Zd Zd Zd ZddZdZdZRS(c`s||_||_j|j|||_|dkrUtd|jf|_n|j|jf|_d|_g|_ g|_ dS(Nuhtml( t_namet _namespaceR t _getETreeTagt_elementtNoneRt nameTupletparentt _childNodest_flags(tselftnamet namespace(t ElementTree(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt__init__s     cS`s)|dkr|}nd||f}|S(Nu{%s}%s(R(RRRt etree_tag((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyR #s  cS`s+||_|j|j|j|j_dS(N(R R R R ttag(RR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setName*s cS`s|jS(N(R (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getName.scS`s+||_|j|j|j|j_dS(N(R R R R R(RR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setNamespace3s cS`s|jS(N(R (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getNamespace7scS`s |jjS(N(R tattrib(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getAttributes<scS`sx-t|jjjD]}|jj|=qWx]|jD]O\}}t|trsd|d|df}n|}|jj||q=WdS(Nu{%s}%sii(tlistR Rtkeystitemst isinstancettupletset(Rt attributestkeytvalueR((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setAttributes?scS`s|jS(N(R(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getChildNodesMscS`s2|j2g|_x|D]}|j|qWdS(N(R Rt insertChild(RR(telement((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setChildNodesPs  cS`st|jjpt|jS(u,Return true if the node has children or text(tboolR ttexttlen(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt hasContentXscS`s0|jj||jj|j||_dS(N(RtappendR R(Rtnode((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt appendChild\scS`s>t|jj|j}|jj||j||_dS(N(R R tindextinsertR(RR3trefNodeR5((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt insertBeforeascS`s0|jj||jj|jd|_dS(N(RtremoveR RR(RR3((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt removeChildfscS`s-t|js?|jjs*d|j_n|jj|7_n|dkr|jdjsnd|jd_n|jdj|7_nt|j}|j|j}|dkr|j|djsd|j|d_n|j|dj|7_n-|jjsd|j_n|jj|7_dS(Nuiii(R0R R/RttailR R5(RtdataR8tchildrenR5((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt insertTextks"    cS`sLt||j|j}x*|jjD]\}}||j|R@RC((R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyR s*               tCommentc`s8eZfdZdZdZeeeZRS(c`s1j||_d|_g|_g|_dS(N(RHR RRRR(RR<(R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs  cS`s |jjS(N(R R/(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_getDatascS`s||j_dS(N(R R/(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt_setDatas(RERFRRIRJRGR<((R(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRHs  t DocumentTypec`sYeZfdZdZdZeeeZdZdZeeeZ RS(c`s2j|d||j_||_||_dS(Nu (RR R/tpublicIdtsystemId(RRRLRM(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs  cS`s|jjddS(NupublicIdu(R tget(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getPublicIdscS`s&|dk r"|jjd|ndS(NupublicId(RR R%(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setPublicIds cS`s|jjddS(NusystemIdu(R RN(R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _getSystemIdscS`s&|dk r"|jjd|ndS(NusystemId(RR R%(RR(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt _setSystemIds ( RERFRRORPRGRLRQRRRM((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRKs    tDocumentc`seZfdZRS(c`sj|ddS(Nu DOCUMENT_ROOT(R(R(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs(RERFR((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRSstDocumentFragmentc`seZfdZRS(c`sj|ddS(NuDOCUMENT_FRAGMENT(R(R(R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRs(RERFR((R (sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRTsc`s8gdfd|ddjS(Nic `sVt|ds|j}n|jdkr|jdsK|jdr|jdp]d}|jdprd}jd|j||fqjd|jfnM|jdkrMjd |jdk rjd d |d |jfn|jdk rtd nt|drt |j rtdqn|jkr}jdd ||jfnt j |j}|dkr|j}n/|j \}}tj|}d||f}jdd ||ft|drg}x|j jD]r\}} t j |}|dk ro|j \}}tj|}d||f} n|} |j| | fqWx?t|D].\}} jdd |d || fqWn|jrjd d |d |jfn|d 7}x|D]} | |q W|jrRjd d |d |jfndS(Nutagu upublicIdusystemIduuu u DOCUMENT_ROOTu #documentu|%s"%s"u iuDocument node cannot have tailuattribu$Document node cannot have attributesu|%su%s %su|%s<%s>u |%s%s="%s"(thasattrtgetrootRRNR2R/RR;t TypeErrorR0Rt tag_regexptmatchtgroupsRtprefixesR"tsorted( R,tindentRLRMtnsmatchRtnstprefixR&R(t attr_stringtchild(tElementTreeCommentTypetrvtserializeElement(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyResZ  %!     ) %   u (tjoin(R,(Rc(RdResK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyttestSerializers7 c`sDgtjfd|djS(u4Serialize an element and its child nodes to a stringc`smt|jr!|j}n|jdkr|jdsN|jdr|jdp`d}|jdpud}jd|j||fqMjd|jfn|jdkrL|jdk rj|jn|jdk rt dnt |d r.t |j r.t d nx|D]}|q5Wn|jkrujd |jfn|j sjd j |jfn^d jg|j jD]%\}}dj ||f^q}jd|j|f|jrj|jnx|D]}|q"Wjd|jf|jrij|jndS(Nu upublicIdusystemIduuu u DOCUMENT_ROOTuDocument node cannot have tailuattribu$Document node cannot have attributesu u<%s>u u%s="%s"u<%s %s>u(R#RRVRRNR2R/RR;RWRUR0Rt fromXmlNameRfR"(R,RLRMRbRR(tattr(RRctfilterRdRe(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRes@   # ;   u(Rt InfosetFilterRf(R,(RRc(RjRdResK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyttostrings  - t TreeBuilderc`sSeZZZZZZZfdZfdZ dZ RS(c`s |S(N((RR,(Rg(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRgAsc`sPr|jjS|jdk r9|jjjd|jS|jjjdSdS(Nu{%s}htmluhtml(tdocumentR tdefaultNamespaceRtfind(R(tfullTree(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyt getDocumentDs   cS`stjj|jS(N(RRmt getFragmentR (R((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRsNs( RERFt documentClasst doctypeClasst elementClasst commentClasst fragmentClasstimplementationRgRrRs((RHRSRTRKR tElementTreeImplementationRqRg(sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyRm9s (RHRRRBRmtlocals(RzRqRlRm(( RHRSRTRKR RRcRzRqRgsK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pytgetETreeBuilders~>64(t __future__RRRtpip._vendor.sixRtretRRRRt_utilsRtcompileRXtFalseR|tgetETreeModule(((sK/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyts  DPK.e[;;$html5lib/treebuilders/etree_lxml.pycnu[ abc@`s'dZddlmZmZmZddlZddlZddlZddlm Z ddl m Z ddlm Z dd lm Z dd lmZddlj Z eZejd Ze jd jZd efdYZdefdYZdZdZde jfdYZdS(uModule for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning i(tabsolute_importtdivisiontunicode_literalsNi(tbasei(tDataLossWarning(t constants(tetree(t _ihatexmlu {([^}]*)}(.*)uasdt DocumentTypecB`seZdZRS(cC`s||_||_||_dS(N(tnametpublicIdtsystemId(tselfR R R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt__init__#s  (t__name__t __module__R (((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR"stDocumentcB`s/eZdZdZdZeeZRS(cC`sd|_g|_dS(N(tNonet _elementTreet _childNodes(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR *s cC`s|jjj|jdS(N(Rtgetroottaddnextt_element(R telement((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt appendChild.scC`s|jS(N(R(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_getChildNodes1s(RRR RRtpropertyt childNodes(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR)s   c`sJgtjdtdfd|ddjS(NtpreventDoubleDashCommentsic `sMt|dst|dr jd|jjr|jjpL|jjsbd|jj}n%d|jj|jj|jjf}jdd|d|fn|j}x"|jdk r|j}qWx|dk r ||d|j }qWqIt |t s+t |t rqt |t sStjd dksStjd d||fqIjd x|D]}||dqWn|jtkr jd d||jft|d rI|jrIjd d||jfqIn=t |tjs$ttjj|j}|dk r|jd}|jd}tj|}jdd||j|fn'jdd|j|jft|drg}x|jjD]\} } tj| }|dk rN|j \}} j| } tj|}d|| f} nj| } |j| | fqWx?t!|D].\} } jdd|d| | fqWn|jrjd d|d|jfn|d7}x|D]} | |qWt|d rI|jrIjd d|d|jfndS(Nutagugetrootu #documentu uu|%s%su iiu|%s"%s"u#document-fragmentu|%sutailiu |%s<%s %s>u|%s<%s>uattribu%s %su |%s%s="%s"("thasattrtappendtdocinfot internalDTDt public_idt system_urlt root_nameRt getpreviousRtgetnextt isinstancetstrtbytestsyst version_infotAssertionErrorttagt comment_typettextttailRt_Elementtetree_builderst tag_regexptmatchtgroupRtprefixest fromXmlNametattribtitemstgroupstsorted( Rtindenttdtd_strt next_elementtnsmatchtnsR,tprefixt attributesR tvaluet attr_stringtchild(t infosetFiltertrvtserializeElement(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRG;st      " (  $    ) %  u (Rt InfosetFiltertTruetjoin(R((RERFRGsP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyttestSerializer7s F c`s/gfd|djS(u4Serialize an element and its child nodes to a stringc`st|dsi|jjrV|jjr6|jj}nd|jj}j|n|jn|jtkrjd|j fn|j sjd|jfnUdj g|j j D]\}}d||f^q}jd|j|f|j r&j|j nx|D]}|q-Wjd|jft|d r|j rj|j ndS( Nutagu u u<%s>u u%s="%s"u<%s %s>uutail(RRR tdoctypeR#RRR,R-R.R7RJR8R/(RR<R RBtattrRD(RFRG(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRGs*    2  u(RJ(R((RFRGsP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyttostrings t TreeBuildercB`seZeZeZd Zd ZeZ e Z e dZ dZdZdZdZdZd dZd dZdZRS( c`stjtd|tjdt|_||_dtffdYdj ffdY}dj ffdY}||_ ||_ t jj||dS( NtfullTreeRt Attributesc`s)eZdfdZfdZRS(c`s|dkri}n||_tj||xo|jD]a\}}t|trzd|dj|df}nj|}||jjj|unameuGlxml cannot represent doctype with a different name to the root elementu$udatau namespaceu{%s}%sN( RLR R+R RR REt coercePubidtfindRzR{RtreplaceRt fromstringRrRnt addpreviousRt documentClassRst getroottreeRtgettdefaultNamespaceR,RmRRRuRRq( R R}tdocStrtsysidtroott comment_tokentcommentR RXt etree_tagt root_element((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt insertRoot7sL    *        N(RRRRRR|RRmRnt fragmentClassRtimplementationtFalseR RoRKRtRyRRpRR(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyROs L      (t__doc__t __future__RRRRztreR)tRRRRR1Rt lxml.etreeRIRPtcompileR2RgR,R-tobjectRRRKRNRO(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt s$    O )PK.e[(6 6 "html5lib/treebuilders/__init__.pycnu[ abc@`sHdZddlmZmZmZddlmZiZddZ dS(uA collection of modules for building different kinds of tree from HTML documents. To create a treebuilder for a new type of tree, you need to do implement several things: 1) A set of classes for various types of elements: Document, Doctype, Comment, Element. These must implement the interface of _base.treebuilders.Node (although comment nodes have a different signature for their constructor, see treebuilders.etree.Comment) Textual content may also be implemented as another node type, or not, as your tree implementation requires. 2) A treebuilder object (called TreeBuilder by convention) that inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes It also has one required method: getDocument - Returns the root node of the complete document tree 3) If you wish to run the unit tests, you must also create a testSerializer method on your treebuilder which accepts a node and returns a string containing Node and its children serialized according to the format used in the unittests i(tabsolute_importtdivisiontunicode_literalsi(t default_etreecK`s|j}|tkr|dkrlddlm}|d krYddlm}|}n|j||jS|dkrddlm }|jt|sPK.e[0v::$html5lib/treebuilders/etree_lxml.pyonu[ abc@`s'dZddlmZmZmZddlZddlZddlZddlm Z ddl m Z ddlm Z dd lm Z dd lmZddlj Z eZejd Ze jd jZd efdYZdefdYZdZdZde jfdYZdS(uModule for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning i(tabsolute_importtdivisiontunicode_literalsNi(tbasei(tDataLossWarning(t constants(tetree(t _ihatexmlu {([^}]*)}(.*)uasdt DocumentTypecB`seZdZRS(cC`s||_||_||_dS(N(tnametpublicIdtsystemId(tselfR R R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt__init__#s  (t__name__t __module__R (((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR"stDocumentcB`s/eZdZdZdZeeZRS(cC`sd|_g|_dS(N(tNonet _elementTreet _childNodes(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR *s cC`s|jjj|jdS(N(Rtgetroottaddnextt_element(R telement((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt appendChild.scC`s|jS(N(R(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_getChildNodes1s(RRR RRtpropertyt childNodes(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR)s   c`sJgtjdtdfd|ddjS(NtpreventDoubleDashCommentsic `s t|ds{t|dr jd|jjr|jjpL|jjsbd|jj}n%d|jj|jj|jjf}jdd|d|fn|j}x"|jdk r|j}qWx|dk r ||d|j }qWq t |t s+t |t rIjd d||fq jd x|D]}||dq]Wn|jtkrjd d||jft|d r |jr jd d||jfq n%tjj|j}|dk rZ|jd }|jd}tj|}jdd||j|fn'jdd|j|jft|drvg}x|jjD]\} } tj| }|dk r|j\}} j| } tj|}d|| f} nj| } |j| | fqWx?t|D].\} } jdd|d| | fqAWn|jrjd d|d|jfn|d7}x|D]} | |qWt|d r |jr jd d|d|jfndS(Nutagugetrootu #documentu uu|%s%su iu|%s"%s"u#document-fragmentu|%sutailiu |%s<%s %s>u|%s<%s>uattribu%s %su |%s%s="%s"(thasattrtappendtdocinfot internalDTDt public_idt system_urlt root_nameRt getpreviousRtgetnextt isinstancetstrtbytesttagt comment_typettextttailtetree_builderst tag_regexptmatchtgroupRtprefixest fromXmlNametattribtitemstgroupstsorted( Rtindenttdtd_strt next_elementtnsmatchtnsR)tprefixt attributesR tvaluet attr_stringtchild(t infosetFiltertrvtserializeElement(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRC;sp      "   $    ) %  u (Rt InfosetFiltertTruetjoin(R((RARBRCsP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyttestSerializer7s F c`s/gfd|djS(u4Serialize an element and its child nodes to a stringc`st|dsi|jjrV|jjr6|jj}nd|jj}j|n|jn|jtkrjd|j fn|j sjd|jfnUdj g|j j D]\}}d||f^q}jd|j|f|j r&j|j nx|D]}|q-Wjd|jft|d r|j rj|j ndS( Nutagu u u<%s>u u%s="%s"u<%s %s>uutail(RRR tdoctypeR#RRR)R*R+R3RFR4R,(RR8R R>tattrR@(RBRC(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRCs*    2  u(RF(R((RBRCsP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyttostrings t TreeBuildercB`seZeZeZd Zd ZeZ e Z e dZ dZdZdZdZdZd dZd dZdZRS( c`stjtd|tjdt|_||_dtffdYdj ffdY}dj ffdY}||_ ||_ t jj||dS( NtfullTreeRt Attributesc`s)eZdfdZfdZRS(c`s|dkri}n||_tj||xo|jD]a\}}t|trzd|dj|df}nj|}||jjj|tkeyR (RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR s   $c`sltj|||t|trFd|dj|df}nj|}||jjj|R (RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRRs $N(RRRR RR((RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRMs tElementc`seZfdZfdZfdZeeeZdZfdZeeeZ dfdZ fdZ RS(c`s;j|}jj||d|||_dS(Nt namespace(t coerceElementRSR t _attributes(R R RT(RMtbuilderRA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR sc`s4j||_|j|j|j|j_dS(N(RUt_namet _getETreeTagt _namespaceRR)(R R (RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_setNamesc`sj|jS(N(R2RX(R (RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_getNamescS`s|jS(N(RV(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_getAttributessc`s|||_dS(N(RV(R R=(RM(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_setAttributessc`s)j|}jj|||dS(N(tcoerceCharactersRSt insertText(R tdatat insertBefore(RWRA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR`sc`sjj||dS(N(RSR(R R@(RW(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRsN( RRR R[R\RR R]R^R=RR`R((RMRWRA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRSs tCommentc`sAeZfdZfdZdZeeeZRS(c`s&j|}jj||dS(N(t coerceCommentRcR (R Ra(RWRA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR sc`sj|}||j_dS(N(RdRR+(R Ra(RA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_setDatascS`s |jjS(N(RR+(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt_getDatas(RRR ReRfRRa((RWRA(sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRcs (R-tgetETreeModuleRRRDRERAtnamespaceHTMLElementsRNRSRct elementClasst commentClassRRKR (R RhRLRSRc((RMRWRAsP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyR s %"  cC`s2tjj||j|_g|_d|_dS(N(RRKtresettinsertCommentInitialt insertCommenttinitial_commentsRRH(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRks  cC`s t|S(N(RG(R R((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRG scC`s$tr|jjS|jjjSdS(N(RLtdocumentRR(R ((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt getDocument s cC`seg}|jdj}|jr2|j|jn|jt||jra|j|jn|S(Ni(t openElementsRR+RtextendtlistR,(R tfragmentR((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt getFragments  cC`s|d}|d}|d}|s@tjdtd|_nO|jj|}||krqtjdtn|j|||}||_dS(NunameupublicIdusystemIdu#lxml cannot represent empty doctypeu%lxml cannot represent non-xml doctype(twarningstwarnRRRHRARUt doctypeClass(R ttokenR R R t coercedNameRH((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt insertDoctypes     cC`s|jj|dS(N(RnR(R Ratparent((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRl,scC`s^||jkrA|jjjdjtkrAtjdtntt |j ||dS(Niu@lxml cannot represent adjacent comments beyond the root elements( RoRRR)R*RvRwRtsuperRKRm(R RaR|((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pytinsertCommentMain1sc C`sZd}|jrN|d|jj7}|jjdk sG|jjdk r|d|jj|jjped7}|jjr |jj}|jddkr|jddkrtj dt |j dd}n|jddkr|d |7}q|d |7}q|d 7}n|d 7}|jj|d krNtj dt qNn|d7}t j |}x4|jD])}|j|d}|j|jqqW|j|_|j|j_|d }|jd|j}|dkr|} nd||f} | |_|j||} || _|jjj| |jj| |j|_dS(uCreate the document rootuu unameuGlxml cannot represent doctype with a different name to the root elementu$udatau namespaceu{%s}%sN(RHR R RR RAt coercePubidtfindRvRwRtreplaceRt fromstringRnRjt addpreviousRt documentClassRot getroottreeRtgettdefaultNamespaceR)RiRRRqR~Rm( R RytdocStrtsysidtroott comment_tokentcommentR RTt etree_tagt root_element((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt insertRoot7sJ    *        N(RRRRRRxRRiRjt fragmentClassRtimplementationtFalseR RkRGRpRuR{RlR~R(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyRKs L      (t__doc__t __future__RRRRvtretsystRRRRR-Rt lxml.etreeRERLtcompileR.RcR)R*tobjectRRRGRJRK(((sP/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyt s$    O )PK.e[;7;7html5lib/treebuilders/base.pyonu[ abc@`shddlmZmZmZddlmZddlmZmZm Z dZ ie ee fd6e eee ddfgBe fd6e eee ddfe ddfgBe fd 6e e ddfe dd fge fd 6e e dd fe dd fgefd 6ZdefdYZdefdYZdefdYZdS(i(tabsolute_importtdivisiontunicode_literals(t text_typei(tscopingElementsttableInsertModeElementst namespacesuhtmlubuttonuoluululistutableuoptgroupuoptionuselecttNodecB`seeZdZdZdZdZd dZdZdZ dZ dZ d Z RS( cC`s:||_d|_d|_i|_g|_g|_dS(u6Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node N(tnametNonetparenttvaluet attributest childNodest_flags(tselfR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__init__s     cC`sadjg|jjD]\}}d||f^q}|rRd|j|fSd|jSdS(Nu u%s="%s"u<%s %s>u<%s>(tjoinR titemsR(RRR t attributesStr((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__str__*s  2cC`s d|jS(Nu<%s>(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt__repr__3scC`s tdS(u3Insert node as a child of the current node N(tNotImplementedError(Rtnode((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt appendChild6scC`s tdS(uInsert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. N(R(Rtdatat insertBefore((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertText;scC`s tdS(uInsert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current nodeN(R(RRtrefNode((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRAscC`s tdS(u:Remove node from the children of the current node N(R(RR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt removeChildGscC`s.x|jD]}|j|q Wg|_dS(uMove all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way N(R R(Rt newParenttchild((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytreparentChildrenLscC`s tdS(uReturn a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes N(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt cloneNodeVscC`s tdS(uFReturn true if the node has children or text, false otherwise N(R(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt hasContent\sN( t__name__t __module__RRRRR RRRR R!R"(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRs       tActiveFormattingElementscB`seZdZdZRS(cC`sd}|tkrxj|dddD]R}|tkr<Pn|j||r[|d7}n|dkr&|j|Pq&q&Wntj||dS(Niiii(tMarkert nodesEqualtremovetlisttappend(RRt equalCounttelement((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR*cs      cC`s0|j|jkstS|j|jks,tStS(N(t nameTupletFalseR tTrue(Rtnode1tnode2((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR'ps (R#R$R*R'(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR%bs t TreeBuildercB`seZdZdZdZdZdZdZdZ dZ ddZ dZ dZ dZdZdZdd Zd Zd Zd ZeeeZd ZdZddZdZddZdZdZdZRS(uBase treebuilder implementation documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes cC`s)|rd|_n d|_|jdS(Nuhttp://www.w3.org/1999/xhtml(tdefaultNamespaceR treset(RtnamespaceHTMLElements((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRs  cC`sCg|_t|_d|_d|_t|_|j|_ dS(N( t openElementsR%tactiveFormattingElementsR t headPointert formPointerR.tinsertFromTablet documentClasstdocument(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR4s      cC`st|d}|s:t|tr:td|f}q:nt|\}}x^t|jD]M}|rv||krvtS| r|j|krtS||j|kArZt SqZWdS(Nu nameTupleuhtml( thasattrt isinstanceRRtlistElementsMaptreversedR6R/R-R.(Rttargettvariantt exactNodet listElementstinvertR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytelementInScopescC`s'|js dSt|jd}|j|}|tksH||jkrLdSxL|tkr||jkr|dkrd}Pn|d8}|j|}qOWxtr"|d7}|j|}|j}|jidd6|jd6|jd6|j d6}||j|<||jdkrPqqWdS( NiiiuStartTagutypeunameu namespaceudata( R7tlenR&R6R/R!t insertElementRt namespaceR (RtitentrytcloneR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt#reconstructActiveFormattingElementss.           cC`s>|jj}x(|jr9|tkr9|jj}qWdS(N(R7tpopR&(RRK((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pytclearActiveFormattingElementsscC`sHxA|jdddD])}|tkr-Pq|j|kr|SqWtS(uCheck if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return falseNi(R7R&RR.(RRtitem((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt!elementInActiveFormattingElementss  cC`s3|j|}|jj||jj|dS(N(t createElementR6R*R<R(RttokenR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertRootscC`sG|d}|d}|d}|j|||}|jj|dS(NunameupublicIdusystemId(t doctypeClassR<R(RRSRtpublicIdtsystemIdtdoctype((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertDoctypes    cC`s:|dkr|jd}n|j|j|ddS(Niudata(R R6Rt commentClass(RRSR ((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt insertComment s cC`sB|d}|jd|j}|j||}|d|_|S(u.Create an element but don't insert it anywhereunameu namespaceudata(tgetR3t elementClassR (RRSRRIR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRRs   cC`s|jS(N(t_insertFromTable(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt_getInsertFromTablescC`s.||_|r|j|_n |j|_dS(usSwitch the function used to insert an element from the normal one to the misnested table one and back againN(R^tinsertElementTableRHtinsertElementNormal(RR ((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt_setInsertFromTables cC`sf|d}|jd|j}|j||}|d|_|jdj||jj||S(Nunameu namespaceudatai(R\R3R]R R6RR*(RRSRRIR,((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRa$s  cC`s|j|}|jdjtkr2|j|S|j\}}|dkr`|j|n|j|||jj ||S(u-Create an element and insert it into the treeiN( RRR6RRRatgetTableMisnestedNodePositionR RRR*(RRSR,R R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR`.s  cC`s{|dkr|jd}n|j sE|jrU|jdjtkrU|j|n"|j\}}|j||dS(uInsert text data.iN(R R6R:RRRRc(RRR R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR>s   cC`sd}d}d}x7|jdddD]}|jdkr)|}Pq)q)W|r|jrm|j}|}q|j|jj|d}n |jd}||fS(usGet the foster parent element, and sibling to insert before (or None) when inserting a misnested table nodeNiutableii(R R6RR tindex(Rt lastTablet fosterParentRtelm((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRcMs    c C`sO|jdj}|td krK||krK|jj|j|ndS( Niuddudtuliuoptionuoptgroupupurpurt(uddudtuliuoptionuoptgroupupurpurt(R6Rt frozensetRNtgenerateImpliedEndTags(RtexcludeR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyRigs   cC`s|jS(uReturn the final tree(R<(R((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt getDocumentqscC`s$|j}|jdj||S(uReturn the final fragmenti(t fragmentClassR6R (Rtfragment((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyt getFragmentus cC`s tdS(uzSerialize the subtree of node in the format required by unit tests node - the node from which to start serializingN(R(RR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyttestSerializer|sN(R#R$t__doc__R R;R]RZRURlRR4RFRMRORQRTRYR[RRR_RbtpropertyR:RaR`RRcRiRkRnRo(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyR2zs6   .             N(t __future__RRRtpip._vendor.sixRt constantsRRRR R&RhR.tsetR/R?tobjectRR)R%R2(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyts*! KPK.e[(6 6 "html5lib/treebuilders/__init__.pyonu[ abc@`sHdZddlmZmZmZddlmZiZddZ dS(uA collection of modules for building different kinds of tree from HTML documents. To create a treebuilder for a new type of tree, you need to do implement several things: 1) A set of classes for various types of elements: Document, Doctype, Comment, Element. These must implement the interface of _base.treebuilders.Node (although comment nodes have a different signature for their constructor, see treebuilders.etree.Comment) Textual content may also be implemented as another node type, or not, as your tree implementation requires. 2) A treebuilder object (called TreeBuilder by convention) that inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes It also has one required method: getDocument - Returns the root node of the complete document tree 3) If you wish to run the unit tests, you must also create a testSerializer method on your treebuilder which accepts a node and returns a string containing Node and its children serialized according to the format used in the unittests i(tabsolute_importtdivisiontunicode_literalsi(t default_etreecK`s|j}|tkr|dkrlddlm}|d krYddlm}|}n|j||jS|dkrddlm }|jt|sPK.e[8q--html5lib/treebuilders/dom.pycnu[ abc@`sddlmZmZmZddlmZddlmZmZddl Z ddl m Z ddl m Z dd l m Z dd lmZd ZeeZdS( i(tabsolute_importtdivisiontunicode_literals(tMutableMapping(tminidomtNodeNi(tbasei(t constants(t namespaces(tmoduleFactoryFactoryc`svdtfdYdtjffdYdtjffdY}dtS(NtAttrListcB`sPeZdZdZdZdZdZdZdZdZ RS(cS`s ||_dS(N(telement(tselfR ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt__init__scS`st|jjjS(N(titerR t attributestkeys(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt__iter__scS`sJt|trtn.|jjj|}||_||jj|:scS`s ||_|jj|jdS(N(tparentR t appendChild(R tnode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR+=s cS`sH|jjj|}|r4|jj||jn|jj|dS(N(R RtcreateTextNodet insertBeforeR+(R tdataR.ttext((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt insertTextAscS`s&|jj|j|j||_dS(N(R R.R*(R R,trefNode((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR.HscS`s8|jj|jkr+|jj|jnd|_dS(N(R t parentNodet removeChildR(R*(R R,((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR4LscS`sOx?|jjrA|jj}|jj||jj|qWg|_dS(N(R t hasChildNodest firstChildR4R+t childNodes(R t newParenttchild((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pytreparentChildrenQs  c`s |jS(N(R (R (R (sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getAttributesXscS`s|rxt|jD]~\}}t|tr|ddk r]|dd|d}n |d}|jj|d||q|jj||qWndS(Niu:ii(RRRRR(R tsetAttributeNSt setAttribute(R RRRt qualifiedName((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt setAttributes[s   c`s|jjtS(N(R t cloneNodetFalse(R (R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR@jscS`s |jjS(N(R R5(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt hasContentmscS`s4|jdkr td|jfS|j|jfSdS(Nuhtml(t namespaceR(RR(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getNameTuplepsN(R"R#R tpropertyRCR+R(R1R.R4R:R;R?RR@RBRDt nameTuple((R R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR$5s         t TreeBuilderc`seZfdZfdZd fdZfdZfdZdZfdZ dZ dZ d d Z Z d ZRS( c`s+jjddd|_tj|S(N(tgetDOMImplementationtcreateDocumentR(tdomtweakreftproxy(R (tDom(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt documentClassysc`st|d}|d}|d}j}|j|||}|jj|tkrp|j|_ndS(NunameupublicIdusystemId(RHtcreateDocumentTypetdocumentR+RRJR(R ttokenRtpublicIdtsystemIdtdomimpltdoctype(RMR$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt insertDoctype}s     c`sO|dkr0|jdkr0|jj|}n|jj||}|S(N(R(tdefaultNamespaceRJt createElementtcreateElementNS(R RRCR,(R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt elementClasssc`s|jj|S(N(RJt createComment(R R/(R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt commentClasssc`s|jjS(N(RJtcreateDocumentFragment(R (R$(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt fragmentClassscS`s|jj|jdS(N(RJR+R (R R,((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR+sc`s |S(N((R R (ttestSerializer(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR_scS`s|jS(N(RJ(R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getDocumentscS`stjj|jS(N(RRGt getFragmentR (R ((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRascS`s|}||kr+tjj|||nwt|jdrtj|jjkrt|jj|j_|jjj tjqn|jj |jj |dS(Nu_child_node_types( RRGR1R&RJRt TEXT_NODEt_child_node_typesRtappendR+R-(R R/R*((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR1s N(R"R#RNRVR(RZR\R^R+R_R`RaR1timplementationR((RMtDomImplementationR$R_(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRGxs     c`s?|jgdfd|ddjS(Nic `s|jtjkr|jr|js-|jrr|jp9d}|jpHd}jdd||j||fqjdd||jfqjdd|fn|jtjkrjdn|jtjkrjdn|jtj kr%jdd||j fnu|jtj krXjd d||j fnBt |d r|j dk rd tj|j |jf}n |j}jd d||f|jrg}xtt|jD]r}|jj|}|j}|j}|j } | r8d tj| |jf}n |j}|j||fqWx?t|D].\}}jd d|d||fqeWn|d7}x|jD]} | |qWdS(Nuu|%su u|%su|%su #documentu#document-fragmentu|%su|%s"%s"u namespaceURIu%s %su|%s<%s>u |%s%s="%s"i(tnodeTypeRtDOCUMENT_TYPE_NODERRRRSRdt DOCUMENT_NODEtDOCUMENT_FRAGMENT_NODEt COMMENT_NODEt nodeValueRbR&R'R(RtprefixesR%t hasAttributestrangeRRtitemRt localNametsortedR7( R tindentRRRSRRtiRRtnsR9(trvtserializeElement(sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyRwsN  !!!      ) u (t normalizetjoin(R ((RvRwsI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyR_s  . (RRRRGtlocals(RfRG((R RMRfR$R_sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyt getDomBuilders $"C(: 6(t __future__RRRt collectionsRtxml.domRRRKtRRRt_utilsR R{t getDomModule(((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyts  PK.e[.""html5lib/treebuilders/dom.pynu[from __future__ import absolute_import, division, unicode_literals from collections import MutableMapping from xml.dom import minidom, Node import weakref from . import base from .. import constants from ..constants import namespaces from .._utils import moduleFactoryFactory def getDomBuilder(DomImplementation): Dom = DomImplementation class AttrList(MutableMapping): def __init__(self, element): self.element = element def __iter__(self): return iter(self.element.attributes.keys()) def __setitem__(self, name, value): if isinstance(name, tuple): raise NotImplementedError else: attr = self.element.ownerDocument.createAttribute(name) attr.value = value self.element.attributes[name] = attr def __len__(self): return len(self.element.attributes) def items(self): return list(self.element.attributes.items()) def values(self): return list(self.element.attributes.values()) def __getitem__(self, name): if isinstance(name, tuple): raise NotImplementedError else: return self.element.attributes[name].value def __delitem__(self, name): if isinstance(name, tuple): raise NotImplementedError else: del self.element.attributes[name] class NodeBuilder(base.Node): def __init__(self, element): base.Node.__init__(self, element.nodeName) self.element = element namespace = property(lambda self: hasattr(self.element, "namespaceURI") and self.element.namespaceURI or None) def appendChild(self, node): node.parent = self self.element.appendChild(node.element) def insertText(self, data, insertBefore=None): text = self.element.ownerDocument.createTextNode(data) if insertBefore: self.element.insertBefore(text, insertBefore.element) else: self.element.appendChild(text) def insertBefore(self, node, refNode): self.element.insertBefore(node.element, refNode.element) node.parent = self def removeChild(self, node): if node.element.parentNode == self.element: self.element.removeChild(node.element) node.parent = None def reparentChildren(self, newParent): while self.element.hasChildNodes(): child = self.element.firstChild self.element.removeChild(child) newParent.element.appendChild(child) self.childNodes = [] def getAttributes(self): return AttrList(self.element) def setAttributes(self, attributes): if attributes: for name, value in list(attributes.items()): if isinstance(name, tuple): if name[0] is not None: qualifiedName = (name[0] + ":" + name[1]) else: qualifiedName = name[1] self.element.setAttributeNS(name[2], qualifiedName, value) else: self.element.setAttribute( name, value) attributes = property(getAttributes, setAttributes) def cloneNode(self): return NodeBuilder(self.element.cloneNode(False)) def hasContent(self): return self.element.hasChildNodes() def getNameTuple(self): if self.namespace is None: return namespaces["html"], self.name else: return self.namespace, self.name nameTuple = property(getNameTuple) class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable def documentClass(self): self.dom = Dom.getDOMImplementation().createDocument(None, None, None) return weakref.proxy(self) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] domimpl = Dom.getDOMImplementation() doctype = domimpl.createDocumentType(name, publicId, systemId) self.document.appendChild(NodeBuilder(doctype)) if Dom == minidom: doctype.ownerDocument = self.dom def elementClass(self, name, namespace=None): if namespace is None and self.defaultNamespace is None: node = self.dom.createElement(name) else: node = self.dom.createElementNS(namespace, name) return NodeBuilder(node) def commentClass(self, data): return NodeBuilder(self.dom.createComment(data)) def fragmentClass(self): return NodeBuilder(self.dom.createDocumentFragment()) def appendChild(self, node): self.dom.appendChild(node.element) def testSerializer(self, element): return testSerializer(element) def getDocument(self): return self.dom def getFragment(self): return base.TreeBuilder.getFragment(self).element def insertText(self, data, parent=None): data = data if parent != self: base.TreeBuilder.insertText(self, data, parent) else: # HACK: allow text nodes as children of the document node if hasattr(self.dom, '_child_node_types'): # pylint:disable=protected-access if Node.TEXT_NODE not in self.dom._child_node_types: self.dom._child_node_types = list(self.dom._child_node_types) self.dom._child_node_types.append(Node.TEXT_NODE) self.dom.appendChild(self.dom.createTextNode(data)) implementation = DomImplementation name = None def testSerializer(element): element.normalize() rv = [] def serializeElement(element, indent=0): if element.nodeType == Node.DOCUMENT_TYPE_NODE: if element.name: if element.publicId or element.systemId: publicId = element.publicId or "" systemId = element.systemId or "" rv.append("""|%s""" % (' ' * indent, element.name, publicId, systemId)) else: rv.append("|%s" % (' ' * indent, element.name)) else: rv.append("|%s" % (' ' * indent,)) elif element.nodeType == Node.DOCUMENT_NODE: rv.append("#document") elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: rv.append("#document-fragment") elif element.nodeType == Node.COMMENT_NODE: rv.append("|%s" % (' ' * indent, element.nodeValue)) elif element.nodeType == Node.TEXT_NODE: rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) else: if (hasattr(element, "namespaceURI") and element.namespaceURI is not None): name = "%s %s" % (constants.prefixes[element.namespaceURI], element.nodeName) else: name = element.nodeName rv.append("|%s<%s>" % (' ' * indent, name)) if element.hasAttributes(): attributes = [] for i in range(len(element.attributes)): attr = element.attributes.item(i) name = attr.nodeName value = attr.value ns = attr.namespaceURI if ns: name = "%s %s" % (constants.prefixes[ns], attr.localName) else: name = attr.nodeName attributes.append((name, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) indent += 2 for child in element.childNodes: serializeElement(child, indent) serializeElement(element, 0) return "\n".join(rv) return locals() # The actual means to get a module! getDomModule = moduleFactoryFactory(getDomBuilder) PK.e[G7nN N !html5lib/treebuilders/__init__.pynu["""A collection of modules for building different kinds of tree from HTML documents. To create a treebuilder for a new type of tree, you need to do implement several things: 1) A set of classes for various types of elements: Document, Doctype, Comment, Element. These must implement the interface of _base.treebuilders.Node (although comment nodes have a different signature for their constructor, see treebuilders.etree.Comment) Textual content may also be implemented as another node type, or not, as your tree implementation requires. 2) A treebuilder object (called TreeBuilder by convention) that inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes It also has one required method: getDocument - Returns the root node of the complete document tree 3) If you wish to run the unit tests, you must also create a testSerializer method on your treebuilder which accepts a node and returns a string containing Node and its children serialized according to the format used in the unittests """ from __future__ import absolute_import, division, unicode_literals from .._utils import default_etree treeBuilderCache = {} def getTreeBuilder(treeType, implementation=None, **kwargs): """Get a TreeBuilder class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - A generic builder for DOM implementations, defaulting to a xml.dom.minidom based implementation. "etree" - A generic builder for tree implementations exposing an ElementTree-like interface, defaulting to xml.etree.cElementTree if available and xml.etree.ElementTree if not. "lxml" - A etree-based builder for lxml.etree, handling limitations of lxml's implementation. implementation - (Currently applies to the "etree" and "dom" tree types). A module implementing the tree type e.g. xml.etree.ElementTree or xml.etree.cElementTree.""" treeType = treeType.lower() if treeType not in treeBuilderCache: if treeType == "dom": from . import dom # Come up with a sane default (pref. from the stdlib) if implementation is None: from xml.dom import minidom implementation = minidom # NEVER cache here, caching is done in the dom submodule return dom.getDomModule(implementation, **kwargs).TreeBuilder elif treeType == "lxml": from . import etree_lxml treeBuilderCache[treeType] = etree_lxml.TreeBuilder elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeBuilder else: raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) return treeBuilderCache.get(treeType) PK.e[o<o<html5lib/_ihatexml.pyonu[ abc@`sZddlmZmZmZddlZddlZddlmZdZdZ dZ dZ d Z d j ee gZd j ee d d d e e gZd j ed gZejdZejdZdZdZeddZdZdZdZdZejdZejdZejdZdefdYZ dS(i(tabsolute_importtdivisiontunicode_literalsNi(tDataLossWarningu^ [#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]u*[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]u [#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309Au  [#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]u} #x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | #[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]u | u.u-u_u#x([\d|A-F]{4,4})u'\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]cC`s g|jdD]}|j^q}g}x|D]}t}xttfD]}|j|}|dk rN|jg|jD]}t |^qt |ddkr|dd|dRBRIR7RERVRSRU(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyR*s"       (!t __future__RRRRZR5t constantsRtbaseChart ideographictcombiningCharactertdigittextenderR"tletterR8RMR[RRRRR$RRR#R R RLRJRCtobjectR*(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyts2  0     PK.e[1Eeehtml5lib/_utils.pycnu[ abc@`s\ddlmZmZmZddlZddlmZddlmZyddl j j Z Wn#e k rddlj jZ nXddddd d d gZejdd koejd dkZyCedZeeesedZeeestnWn eZnXeZdefdYZdZdZdZdZdS(i(tabsolute_importtdivisiontunicode_literalsN(t ModuleType(t text_typeu default_etreeuMethodDispatcheruisSurrogatePairusurrogatePairToCodepointumoduleFactoryFactoryusupports_lone_surrogatesuPY27iiiu"\uD800"u u"\uD800"tMethodDispatchercB`s#eZdZddZdZRS(upDict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. cC`sg}xi|D]a\}}t|ttttfr[x7|D]}|j||fq;Wq |j||fq Wtj||t|t|kst d|_ dS(N( t isinstancetlistttuplet frozensettsettappendtdictt__init__tlentAssertionErrortNonetdefault(tselftitemst _dictEntriestnametvaluetitem((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyR 4s cC`stj|||jS(N(R tgetR(Rtkey((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyt __getitem__Cs((t__name__t __module__t__doc__R R(((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyR's  cC`sht|dkogt|ddkogt|ddkogt|ddkogt|ddkS(Niiiiiii(Rtord(tdata((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pytisSurrogatePairJs,cC`s2dt|dddt|dd}|S(Niiiiii(R(Rtchar_val((s?/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pytsurrogatePairToCodepointPsc`sifd}|S(Nc`sttjtdr(d|j}n d|j}t|j}y|||SWntk rt|}|||}|jj|dkri|s0    &   #  PK.e[ WGGhtml5lib/_tokenizer.pyonu[ abc@`sddlmZmZmZddlmZddlmZddl m Z ddl m Z ddl m Z m Z ddl mZmZmZdd l mZmZdd l mZdd lmZdd lmZee Zd efdYZdS(i(tabsolute_importtdivisiontunicode_literals(tunichr(tdequei(tspaceCharacters(tentities(t asciiLetterstasciiUpper2Lower(tdigitst hexDigitstEOF(t tokenTypest tagTokenTypes(treplacementCharacters(tHTMLInputStream(tTriet HTMLTokenizercB`seZdZdJdZdZdZdJedZdZ dZ dZ dZ d Z d Zd Zd Zd ZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZ dZ!dZ"dZ#d Z$d!Z%d"Z&d#Z'd$Z(d%Z)d&Z*d'Z+d(Z,d)Z-d*Z.d+Z/d,Z0d-Z1d.Z2d/Z3d0Z4d1Z5d2Z6d3Z7d4Z8d5Z9d6Z:d7Z;d8Z<d9Z=d:Z>d;Z?d<Z@d=ZAd>ZBd?ZCd@ZDdAZEdBZFdCZGdDZHdEZIdFZJdGZKdHZLdIZMRS(Ku  This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. cK`sbt|||_||_t|_g|_|j|_t|_d|_ t t |j dS(N(RtstreamtparsertFalset escapeFlagt lastFourCharst dataStatetstatetescapetNonet currentTokentsuperRt__init__(tselfRRtkwargs((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR"s      cc`s}tg|_xg|jrxx6|jjrVitdd6|jjjdd6Vq!Wx|jrt|jjVqZWqWdS(u This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. u ParseErrorutypeiudataN(Rt tokenQueueRRterrorsR tpoptpopleft(R((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyt__iter__1s * c %C`st}d}|r!t}d}ng}|jj}x8||krp|tk rp|j||jj}q9Wtdj||}|tkrt|}|j jit dd6dd6i|d6d 6nd |kod kns|d kr3d }|j jit dd6dd6i|d6d 6nrd|koJdknsd|kofdknsd|kodknsd|kodkns|t ddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d g#krQ|j jit dd6dd6i|d6d 6nyt |}WnAt k r|d8}t d |d?Bt d9|d:@B}nX|d;kr|j jit dd6d<d6|jj|n|S(=uThis function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. i iuu ParseErrorutypeu$illegal-codepoint-for-numeric-entityudatau charAsIntudatavarsiiiu�iiiiiiiii iiiiiiiiiiiiiiiiiii i i i i i i i i i iiiiiiiiu;u numeric-entity-without-semicolon(R R RtcharR tappendtinttjoinRR R t frozensettchrt ValueErrortunget( RtisHextallowedtradixt charStacktct charAsIntR%tv((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pytconsumeNumberEntityAs`              *  c C`sd}|jjg}|dtks]|dtddfks]|dk rt||dkrt|jj|dn|ddkrpt}|j|jj|ddkrt}|j|jjn|r|dt ks| r"|dt kr"|jj|d|j |}q7|j jit dd 6d d 6|jj|jdd j|}nxF|dtk rtjd j|sPn|j|jjqsWy,tjd j|d }t|}Wntk rd}nX|dk r|dd kr@|j jit dd 6dd 6n|dd kr|r||tks||t ks||dkr|jj|jdd j|}q7t|}|jj|j|d j||7}nK|j jit dd 6dd 6|jj|jdd j|}|r[|jd ddc|7u ParseErroru'expected-tag-name-but-got-right-bracketu Charactersu<>u?u'expected-tag-name-but-got-question-markuexpected-tag-nameu<(RR%tmarkupDeclarationOpenStateRtcloseTagOpenStateRR RRt tagNameStateR R&RR,tbogusCommentStateR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRHis6      "   " cC`s?|jj}|tkrSitdd6|d6gd6td6|_|j|_n|dkr|jj itdd6dd6|j |_n|t kr|jj itdd6d d6|jj itd d6d d6|j |_nL|jj itdd6d d6i|d6d 6|jj ||j |_tS(NuEndTagutypeunameudatau selfClosingu>u ParseErroru*expected-closing-tag-but-got-right-bracketu expected-closing-tag-but-got-eofu Charactersuu ParseErrorutypeueof-in-tag-nameudatau/uuinvalid-codepointunameu�(RR%RtbeforeAttributeNameStateRRFR R R&R RtselfClosingStartTagStateRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRVs"        cC`su|jj}|dkr3d|_|j|_n>|jjitdd6dd6|jj||j |_t S(Nu/uu Charactersutypeu|jjitdd6dd6|jj ||j |_t S(Nu Charactersutypeuu Charactersu|jjitdd6dd6|jj||j |_t S(Nu/uu Charactersutypeu|jjitdd6dd6|jj ||j |_t S(Nu Charactersutypeuu Charactersu|jjitdd6dd6|jj ||j |_t S( Nu/uu!u Charactersutypeu|jjitdd6dd6|jj ||j |_t S(Nu Charactersutypeuu Charactersuuu ParseErroruinvalid-codepointu�( RR%R R&R RgRRRRhR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRes& %  "    " cC`s|jj}|dkr3d|_|j|_n|tkr}|jjitdd6d|d6||_|j |_n>|jjitdd6dd6|jj ||j |_t S(Nu/uu Charactersutypeu|jjitdd6dd6|jj ||j |_t S(Nu Charactersutypeuu Charactersuu Charactersutypeudatauscript(u/u>(RR%RR)R R&R RZR]tscriptDataDoubleEscapedStateRRhRR,R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRjs" " cC`s?|jj}|dkrL|jjitdd6dd6|j|_n|dkr|jjitdd6dd6|j|_n|dkr|jjitdd6dd6|jjitdd6d d6n_|tkr|jjitdd6d d6|j |_n"|jjitdd6|d6t S( Nu-u Charactersutypeudatauuu ParseErroruinvalid-codepointu�ueof-in-script-in-script( RR%R R&R RnRRRRlR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRos, % " "     " cC`su|jj}|dkrU|jjitdd6dd6d|_|j|_n|jj||j |_t S(Nu/u Charactersutypeudatau( RR%R R&R RZtscriptDataDoubleEscapeEndStateRR,RlR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRn0s "  cC`s|jj}|ttdBkrz|jjitdd6|d6|jjdkrk|j |_ q|j |_ n\|t kr|jjitdd6|d6|j|7_n|jj ||j |_ tS(Nu/u>u Charactersutypeudatauscript(u/u>(RR%RR)R R&R RZR]RhRRlRR,R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRp;s" " cC`s|jj}|tkr1|jjttnz|tkrf|jdj|dg|j|_ nE|dkr|j n,|dkr|j |_ n|dkr|j jit d d 6d d6|jdj|dg|j|_ n|d krH|j jit d d 6d d6|jdjddg|j|_ nc|tkr|j jit d d 6dd6|j|_ n&|jdj|dg|j|_ tS(Nudatauu>u/u'u"u=uu/uu ParseErrorutypeuinvalid-codepointu�u'u"uudatauu/uu ParseErrorutypeuinvalid-codepointu�u'u"uu ParseErrorutypeu.expected-attribute-value-but-got-right-bracketudatauuinvalid-codepointiiu�u=u               cC`s|jj}|dkr*|j|_n|dkrF|jdn|dkr|jjitdd6dd6|jddd cd 7u"u'u=uu"u'u=u|jj it dd6dd6|jj ||j|_t S(Nu>u/u ParseErrorutypeu$unexpected-EOF-after-attribute-valueudatau*unexpected-character-after-attribute-value(RR%RRXRRFRYR R R&R R,RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR{ s"        cC`s|jj}|dkr5t|jd<|jn|tkr|jjitdd6dd6|jj ||j |_ n>|jjitdd6dd6|jj ||j |_ tS(Nu>u selfClosingu ParseErrorutypeu#unexpected-EOF-after-solidus-in-tagudatau)unexpected-character-after-solidus-in-tag( RR%R5RRFR R R&R R,RRRX(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRY4s       cC`sc|jjd}|jdd}|jjitdd6|d6|jj|j|_t S(Nu>uu�uCommentutypeudata( RRItreplaceR R&R R%RRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRWFs   cC`sB|jjg}|ddkrv|j|jj|ddkritdd6dd6|_|j|_tSnw|ddkr(t}xPdd d!d"d#d$fD]6}|j|jj|d|krt}PqqW|ritdd6dd6dd6dd6td6|_|j |_tSn|ddkr|j dk r|j j j r|j j j dj|j j jkrt}xPd dddddgD]6}|j|jj|d|krt}PqqW|r|j|_tSn|jjitdd6dd6x |r1|jj|jqW|j|_tS(%Niu-uCommentutypeuudatauduDuouOucuCutuTuyuYupuPueuEuDoctypeunameupublicIdusystemIducorrectu[uAu ParseErroruexpected-dashes-or-doctype(uduD(uouO(ucuC(utuT(uyuY(upuP(ueuE(RR%R&R RtcommentStartStateRR5RRt doctypeStateRttreet openElementst namespacetdefaultNamespacetcdataSectionStateR R,R"RW(RR0tmatchedtexpected((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRTUsR    %    cC`s1|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uincorrect-commentueof-in-comment( RR%tcommentStartDashStateRR R&R RRR t commentStateR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR}s(        cC`s5|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uincorrect-commentueof-in-comment( RR%tcommentEndStateRR R&R RRR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs(        cC`s|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uu ParseErrorutypeuinvalid-codepointudatau--�u!u,unexpected-bang-after-double-dash-in-commentu-u,unexpected-dash-after-double-dash-in-commentueof-in-comment-double-dashuunexpected-char-in-commentu--( RR%R R&RRRR RtcommentEndBangStateR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs6           cC`s2|jj}|dkr=|jj|j|j|_n|dkrk|jdcd7<|j|_n|dkr|jjitdd6dd6|jdcd 7<|j |_ns|t kr |jjitdd6d d6|jj|j|j|_n#|jdcd|7<|j |_t S( Nu>u-udatau--!uu ParseErrorutypeuinvalid-codepointu--!�ueof-in-comment-end-bang-state( RR%R R&RRRRR RR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs(       cC`s|jj}|tkr*|j|_n|tkr|jjitdd6dd6t |j d<|jj|j |j |_n>|jjitdd6dd6|jj ||j|_t S(Nu ParseErrorutypeu!expected-doctype-name-but-got-eofudataucorrectuneed-space-after-doctype(RR%RtbeforeDoctypeNameStateRR R R&R RRRR,R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR~ s      cC`s?|jj}|tkrn|dkr{|jjitdd6dd6t|jd<|jj|j|j|_ n|dkr|jjitdd6dd6d |jd <|j |_ nv|t kr"|jjitdd6d d6t|jd<|jj|j|j|_ n||jd <|j |_ t S( Nu>u ParseErrorutypeu+expected-doctype-name-but-got-right-bracketudataucorrectuuinvalid-codepointu�unameu!expected-doctype-name-but-got-eof( RR%RR R&R RRRRtdoctypeNameStateR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs.            cC`ss|jj}|tkrG|jdjt|jd<|j|_n(|dkr|jdjt|jd<|jj |j|j |_n|dkr|jj it dd6dd6|jdcd7<|j |_n|t kr\|jj it dd6d d6t|jd <|jdjt|jd<|jj |j|j |_n|jdc|7uu ParseErrorutypeuinvalid-codepointudatau�ueof-in-doctype-nameucorrect(RR%RRRDRtafterDoctypeNameStateRR R&RR RR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR6s,       cC`s|jj}|tkrn|dkrL|jj|j|j|_n|tkrt |jd<|jj ||jjit dd6dd6|jj|j|j|_n9|dkr)t }xBd d!d"d#d$fD]+}|jj}||krt }PqqW|r|j |_t Snp|d%krt }xBd&d'd(d)d*fD]+}|jj}||krQt }PqQqQW|r|j|_t Sn|jj ||jjit dd6dd6i|d6d6t |jd<|j|_t S(+Nu>ucorrectu ParseErrorutypeueof-in-doctypeudataupuPuuuUubuBuluLuiuIucuCusuSuyuYutuTueuEumuMu*expected-space-or-right-bracket-in-doctypeudatavars(upuP(uuuU(ubuB(uluL(uiuI(ucuC(usuS(uyuY(usuS(utuT(ueuE(umuM(RR%RR R&RRRR RR,R R5tafterDoctypePublicKeywordStatetafterDoctypeSystemKeywordStatetbogusDoctypeState(RRJRR((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyROsT               cC`s|jj}|tkr*|j|_n|d krw|jjitdd6dd6|jj||j|_ny|t kr|jjitdd6dd6t |j d<|jj|j |j |_n|jj||j|_t S( Nu'u"u ParseErrorutypeuunexpected-char-in-doctypeudataueof-in-doctypeucorrect(u'u"(RR%Rt"beforeDoctypePublicIdentifierStateRR R&R R,R RRRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs"       cC`sg|jj}|tkrnE|dkrFd|jd<|j|_n|dkrnd|jd<|j|_n|dkr|jjit dd6dd 6t |jd <|jj|j|j |_n|t kr(|jjit dd6d d 6t |jd <|jj|j|j |_n;|jjit dd6d d 6t |jd <|j |_tS( Nu"uupublicIdu'u>u ParseErrorutypeuunexpected-end-of-doctypeudataucorrectueof-in-doctypeuunexpected-char-in-doctype(RR%RRt(doctypePublicIdentifierDoubleQuotedStateRt(doctypePublicIdentifierSingleQuotedStateR R&R RRR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs4              cC`s?|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uunexpected-end-of-doctypeucorrectueof-in-doctype( RR%t!afterDoctypePublicIdentifierStateRR R&R RRRR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs*         cC`s?|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uunexpected-end-of-doctypeucorrectueof-in-doctype( RR%RRR R&R RRRR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs*         cC`s|jj}|tkr*|j|_nZ|dkrX|jj|j|j|_n,|dkr|jjit dd6dd6d|jd<|j |_n|d kr|jjit dd6dd6d|jd<|j |_n|t krI|jjit dd6d d6t |jd <|jj|j|j|_n;|jjit dd6dd6t |jd <|j|_tS( Nu>u"u ParseErrorutypeuunexpected-char-in-doctypeudatauusystemIdu'ueof-in-doctypeucorrect(RR%Rt-betweenDoctypePublicAndSystemIdentifiersStateRR R&RRR t(doctypeSystemIdentifierDoubleQuotedStatet(doctypeSystemIdentifierSingleQuotedStateR RRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs6              cC`s8|jj}|tkrn|dkrL|jj|j|j|_n|dkrtd|jd<|j|_n|dkrd|jd<|j |_n|t kr|jjit dd6dd 6t |jd <|jj|j|j|_n;|jjit dd6d d 6t |jd <|j |_tS( Nu>u"uusystemIdu'u ParseErrorutypeueof-in-doctypeudataucorrectuunexpected-char-in-doctype(RR%RR R&RRRRRR R RRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs.            cC`s|jj}|tkr*|j|_n|d krw|jjitdd6dd6|jj||j|_ny|t kr|jjitdd6dd6t |j d<|jj|j |j |_n|jj||j|_t S( Nu'u"u ParseErrorutypeuunexpected-char-in-doctypeudataueof-in-doctypeucorrect(u'u"(RR%Rt"beforeDoctypeSystemIdentifierStateRR R&R R,R RRRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs"       cC`sg|jj}|tkrnE|dkrFd|jd<|j|_n|dkrnd|jd<|j|_n|dkr|jjit dd6dd 6t |jd <|jj|j|j |_n|t kr(|jjit dd6d d 6t |jd <|jj|j|j |_n;|jjit dd6dd 6t |jd <|j |_tS( Nu"uusystemIdu'u>u ParseErrorutypeuunexpected-char-in-doctypeudataucorrectueof-in-doctype(RR%RRRRRR R&R RRR RR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR/s4              cC`s?|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uunexpected-end-of-doctypeucorrectueof-in-doctype( RR%t!afterDoctypeSystemIdentifierStateRR R&R RRRR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRLs*         cC`s?|jj}|dkr*|j|_n|dkrn|jjitdd6dd6|jdcd7uunexpected-end-of-doctypeucorrectueof-in-doctype( RR%RRR R&R RRRR R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRds*         cC`s|jj}|tkrn|dkrL|jj|j|j|_n|tkr|jjit dd6dd6t |jd<|jj|j|j|_n.|jjit dd6dd6|j |_t S(Nu>u ParseErrorutypeueof-in-doctypeudataucorrectuunexpected-char-in-doctype( RR%RR R&RRRR R RRR5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyR|s        cC`s|jj}|dkr=|jj|j|j|_n>|tkr{|jj||jj|j|j|_nt S(Nu>( RR%R R&RRRR R,R5(RRJ((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs  cC`sNg}xtr|j|jjd|j|jjd|jj}|tkr`Pq |dddkr|dd |d|j jit dd 6|d 6n|j |_ tS(Nu]u>iiu]]uuiu ParseErrorutypeuinvalid-codepointudatau�u Characters(R5R&RRIR%R R(tcounttrangeR R R|RR(RRJR%t nullCountRw((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs.    N(Nt__name__t __module__t__doc__RRR$R4RRBRCRFRRGRNRLRPRRRSRHRURVRMR[R\ROR_R`RQRaRcRbRdRhRfReRgRiRkRjRlRmRoRnRpRXRqRsRrRxRzRyR{RYRWRTR}RRRRRR~RRRRRRRRRRRRRRRR(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyRs    HP          #                  6 "       -          3            N(t __future__RRRtpip._vendor.sixRR*t collectionsRt constantsRRRRR R R R R Rt _inputstreamRt_trieRR6tobjectR(((sC/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyts PK.e[ ]j]jhtml5lib/_inputstream.pyonu[ abc!@`sddlmZmZmZddlmZmZddlmZm Z ddl Z ddl Z ddl m Z ddlmZmZmZmZddlmZdd lmZdd lmZydd lmZWnek reZnXegeD]Zejd ^qZegeD]Zejd ^q"ZegeD]Zejd ^qJZeed dgBZ dZ!ej"re j#e!d e$ddZ%ne j#e!Z%e&dddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2g Z'e j#d3Z(iZ)d4e*fd5YZ+d6Z,d7e*fd8YZ-d9e-fd:YZ.d;e/fd<YZ0d=e*fd>YZ1d?e*fd@YZ2dAZ3dS(Bi(tabsolute_importtdivisiontunicode_literals(t text_typet binary_type(t http_clientturllibN(t webencodingsi(tEOFtspaceCharacterst asciiLetterstasciiUppercase(tReparseException(t_utils(tStringIO(tBytesIOuasciit>tt|j||krL|t|j|8}|d7}qW||g|_dS(Nii(RRR(RRtoffsetti((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytseekLs cC`sp|js|j|S|jdt|jkr_|jdt|jdkr_|j|S|j|SdS(Niii(Rt _readStreamRRt_readFromBuffer(Rtbytes((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytreadUs     cC`s&tg|jD]}t|^q S(N(tsumRR(Rtitem((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt_bufferedBytes^scC`sL|jj|}|jj||jdcd7 Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) u􏿿iiuutf-8ucertainN( R tsupports_lone_surrogatestNonetreportCharacterErrorsRtcharacterErrorsUCS4tcharacterErrorsUCS2tnewLinestlookupEncodingt charEncodingt openStreamt dataStreamtreset(RR>((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs    cC`sCd|_d|_d|_g|_d|_d|_d|_dS(Nui(Rt chunkSizet chunkOffsetterrorst prevNumLinest prevNumColsREt_bufferedCharacter(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRNs      cC`s(t|dr|}n t|}|S(uvProduces a file object from source. source can be either a file object, local filename or a string. uread(R9R(RR>R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRLs  cC`st|j}|jdd|}|j|}|jdd|}|dkr\|j|}n||d}||fS(Nu iii(RtcountRRtrfindRS(RRRtnLinest positionLinet lastLinePostpositionColumn((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt _positions   cC`s&|j|j\}}|d|fS(u:Returns (line, col) of the current position in the stream.i(R[RP(Rtlinetcol((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sL|j|jkr%|js%tSn|j}|j|}|d|_|S(uo Read one character from the stream or queue if available. Return EOF when EOF is reached. i(RPROt readChunkRR(RRPtchar((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR_s    cC`sO|dkr|j}n|j|j\|_|_d|_d|_d|_|jj |}|j r|j |}d|_ n |st St |dkrt |d}|dksd|kodknr|d|_ |d }qn|jr|j|n|jdd }|jd d }||_t ||_tS( Nuiiii iiu u u (REt_defaultChunkSizeR[RORRRSRRPRMR"RTR8RtordRFtreplacetTrue(RROR'tlastv((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR^s0        (   cC`s:x3tttj|D]}|jjdqWdS(Nuinvalid-codepoint(trangeRtinvalid_unicode_retfindallRQR&(RR't_((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRG%s"cC`st}xtj|D]}|r(qnt|j}|j}tj|||d!rtj|||d!}|t kr|j j dnt }q|dkr|dkr|t |dkr|j j dqt}|j j dqWdS(Niuinvalid-codepointiii(R8RftfinditerRatgrouptstartR tisSurrogatePairtsurrogatePairToCodepointtnon_bmp_invalid_codepointsRQR&RcR(RR'tskiptmatcht codepointRtchar_val((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRH)s    c C`sTyt||f}Wnqtk rdjg|D]}dt|^q1}|scd|}ntjd|}t||f Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) iidN(RLt rawStreamR<Rt numBytesMetatnumBytesChardettoverride_encodingttransport_encodingtsame_origin_parent_encodingtlikely_encodingtdefault_encodingtdetermineEncodingRKRN(RR>RRRRRt useChardet((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs       cC`s3|jdjj|jd|_tj|dS(Niureplace(RKt codec_infot streamreaderRRMR<RN(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRNs"cC`sUt|dr|}n t|}y|j|jWnt|}nX|S(uvProduces a file object from source. source can be either a file object, local filename or a string. uread(R9RRRR(RR>R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRLs  cC`s |jdf}|ddk r&|St|jdf}|ddk rO|St|jdf}|ddk rx|S|jdf}|ddk r|St|jdf}|ddk r|djjd r|St|j df}|ddk r|S|ryddl m }Wnt k r4qXg}|}xF|j s|jj|j}|soPn|j||j|qGW|jt|jd}|jjd|dk r|dfSnt|jdf}|ddk r|StddfS(Nucertainiu tentativeuutf-16(tUniversalDetectoruencodingu windows-1252(t detectBOMRERJRRtdetectEncodingMetaRtnamet startswithRtchardet.universaldetectorRt ImportErrortdoneRR"RR&tfeedtclosetresultRR(RtchardetRKRtbufferstdetectorRtencoding((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRsP'       cC`st|}|dkrdS|jdkr:td}nr||jdkrf|jddf|_nF|jjd|df|_|jtd|jd|fdS(Nuutf-16beuutf-16leuutf-8iucertainuEncoding changed from %s to %s(uutf-16beuutf-16le(RJRERRKRRRNR (Rt newEncoding((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytchangeEncodings    cC`sidtj6dtj6dtj6dtj6dtj6}|jjd}|j|d }d}|s|j|}d}|s|j|d }d}qn|r|jj |t |S|jj d d Sd S( uAttempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return Noneuutf-8uutf-16leuutf-16beuutf-32leuutf-32beiiiiN( tcodecstBOM_UTF8t BOM_UTF16_LEt BOM_UTF16_BEt BOM_UTF32_LEt BOM_UTF32_BERR"tgetRRJRE(RtbomDicttstringRR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs$   cC`sk|jj|j}t|}|jjd|j}|dk rg|jdkrgtd}n|S(u9Report the encoding declared by the meta element iuutf-16beuutf-16leuutf-8N(uutf-16beuutf-16le( RR"RtEncodingParserRt getEncodingRERRJ(RRtparserR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR9s  N( R0R1R2RERcRRNRLRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyR=s(   >  "t EncodingBytescB`seZdZdZdZdZdZdZdZdZ dZ e e e Z d Z e e Zed Zd Zd Zd ZRS(uString-like object with an associated position and various extra methods If the position is ever greater than the string length then an exception is raisedcC`stj||jS(N(R!t__new__tlower(Rtvalue((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRLscC`s d|_dS(Ni(R[(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRPscC`s|S(N((R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt__iter__TscC`sS|jd}|_|t|kr/tn|dkrDtn|||d!S(Nii(R[Rt StopIterationR;(Rtp((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt__next__Ws    cC`s |jS(N(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytnext_scC`sY|j}|t|kr$tn|dkr9tn|d|_}|||d!S(Nii(R[RRR;(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytpreviouscs    cC`s+|jt|krtn||_dS(N(R[RR(RR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt setPositionls cC`s<|jt|krtn|jdkr4|jSdSdS(Ni(R[RRRE(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt getPositionqs  cC`s||j|jd!S(Ni(R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytgetCurrentByte{scC`sc|j}xJ|t|krU|||d!}||krH||_|S|d7}q W||_dS(uSkip past a list of charactersiN(RRR[RE(RRzRR{((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRos    cC`sc|j}xJ|t|krU|||d!}||krH||_|S|d7}q W||_dS(Ni(RRR[RE(RRzRR{((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt skipUntils    cC`sQ|j}|||t|!}|j|}|rM|jt|7_n|S(uLook for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone(RRR(RR!RR'R+((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyt matchBytess  cC`sh||jj|}|dkr^|jdkr=d|_n|j|t|d7_tStdS(uLook for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the matchiiiN(RtfindR[RRcR(RR!t newPosition((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytjumpTos  (R0R1R2RRRRRRRRtpropertyRRt currentBytetspaceCharactersBytesRoRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRHs           RcB`s_eZdZdZdZdZdZdZdZdZ dZ d Z RS( u?Mini parser for detecting character encoding from meta elementscC`st||_d|_dS(u3string - the data to work on for encoding detectionN(RR'RER(RR'((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sd|jfd|jfd|jfd|jfd|jfd|jff}xv|jD]k}t}xR|D]J\}}|jj|rky|}PWqtk rt }PqXqkqkW|sXPqXqXW|j S(Ns(R'R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`sK|jjtkrtSt}d}x"trF|j}|dkrGtS|ddkr|ddk}|rC|dk rC||_tSq%|ddkr|d}t|}|dk rC||_tSq%|ddkr%t t |d}|j }|dk rCt|}|dk r@|r4||_tS|}q@qCq%q%WdS(Nis http-equivis content-typetcharsettcontent( R'RRRcR8REt getAttributeRRJtContentAttrParserRtparse(Rt hasPragmatpendingEncodingtattrttentativeEncodingtcodect contentParser((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs:             cC`s |jtS(N(thandlePossibleTagR8(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`st|j|jtS(N(RR'RRc(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs cC`s|j}|jtkr9|r5|j|jntS|jt}|dkra|jn+|j}x|dk r|j}qpWtS(NR( R'RtasciiLettersBytesRRRcRtspacesAngleBracketsRRE(RtendTagR'R{R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs      cC`s|jjdS(NR(R'R(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRscC`s|j}|jttdgB}|dkr5dSg}g}xtr|dkr`|r`Pnz|tkr||j}Pn^|d krdj|dfS|tkr|j|j n|dkrdS|j|t |}qDW|dkr|j dj|dfSt ||j}|d kr|}xtrt |}||krt |dj|dj|fS|tkr|j|j q>|j|q>Wn^|dkrdj|dfS|tkr|j|j n|dkr dS|j|x}trt |}|t krSdj|dj|fS|tkru|j|j q|dkrdS|j|qWdS( u_Return a name,value pair for the next attribute in the stream, if one is found, or Nonet/Rt=R(t't"N(RN(RR(RR( R'RoRt frozensetRERcR)tasciiUppercaseBytesR&RRRR(RR'R{tattrNamet attrValuet quoteChar((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRsf                            ( R0R1R2RRRRRRRRR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRs    $    RcB`seZdZdZRS(cC`s ||_dS(N(R'(RR'((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRfscC`s:y!|jjd|jjd7_|jj|jjdksHdS|jjd7_|jj|jjdkr|jj}|jjd7_|jj}|jj|r|j||jj!SdSnP|jj}y(|jjt|j||jj!SWntk r|j|SXWntk r5dSXdS(NRiRRR(RR( R'RRRoRRERRR(Rt quoteMarkt oldPosition((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRjs.       (R0R1RR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRes cC`swt|tr:y|jd}Wq:tk r6dSXn|dk roytj|SWqstk rkdSXndSdS(u{Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.uasciiN(R3RtdecodetUnicodeDecodeErrorRERtlookuptAttributeError(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyRJs   (4t __future__RRRtpip._vendor.sixRRtpip._vendor.six.movesRRRRut pip._vendorRt constantsRR R R R R(R tioRRRRR$tencodeRRRRtinvalid_unicode_no_surrogateRDRvtevalRftsetRntascii_punctuation_reRstobjectRRCR<R=R!RRRRJ(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pytsP  "  (((  J h'PK.e[{hDDhtml5lib/constants.pyonu[ abcP@`sNddlmZmZmZddlZdZidd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6dUd]6dUd^6d_d`6dadb6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6dqdr6dsdt6dudv6dwdx6dydz6d{d|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Zidd6dd6d d 6d d 6d d6dd6Ze eddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfed dfed dfed d fgZ e edd!fedd"fedd#fedd$fedd%fedd&fedd'fedd(fedd)fedd*fedd+fedd,fedd-fedd.fgZ e edd/feddfedd0fedd1fedd2fedd3fedd4fedd5fedd6fedd7fedd8fedd9feddfedd:fedd;fedd<fedd=fedd>fedd?fedd@feddAfeddBfeddCfeddDfeddEfeddFfeddGfeddHfeddIfeddJfeddKfeddLfeddMfeddNfeddOfeddPfeddQfeddRfeddSfeddfeddTfeddUfeddVfeddWfeddXfeddYfeddZfedd[feddfedd\fedd]fedd^fedd_fedd`feddafeddfeddbfeddcfedddfeddefeddffeddgfeddhfeddifeddjfeddfeddkfeddfeddlfeddmfeddfeddnfedd feddofeddpfeddqfeddrfed dfgNZ e eddsfed dfed dfed d fgZ e eddfeddfeddfeddfeddfgZi>dtdu6dvdw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Zidd6Zi d ded fd6d ded fd6d ded fd6d ded fd6d ded fd6d d ed fd6d ded fd6dd3edfd6ddedfd6ddedfd6ddedfd6dd edfd6ZegejD]'\Z\ZZZeefef^q Ze ddddd gZe ddkdmdndogZe ejZe ejZe ejZ e ej!Z!e ej"Z#egejD]$Z$e%e$e%e$j&f^q Z'dZ(e d3d=d dZd]dSd8dVdDddd0d;dWd d gZ)e d dlgZ*e djdgdrdTd_d`dagZ+ie d gd6e dgdj6e dgdV6e ddgd6e ddgd6e ddgdg6e dgd?6e ddgd6e ddddgd=6e dgdS6e dgd\6e dd gdE6e dd d!gd"6e dd gd#6e dd$gd96e dd d%d$ddgdW6e dd d$dgdi6e dd gd&6Z,dZ-e dCdDdEdFdGgZ.idHdI6dHdJ6dKdL6dKdM6dNdO6dNdP6dQdR6dSdT6dSdU6dVdW6dXdY6dZd[6dZd\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6didk6dldm6dndo6dpdq6dpdr6dsdt6dsdu6dvdw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6d d!6d"d#6d$d%6d&d'6d(d)6d*d+6d,d-6d.d/6d0d16d2d36dd46d5d66d7d86d9d:6d;d<6d;d=6d>d?6d>d@6dAdB6dCdD6dCdE6dFdG6dHdI6dJdK6dLdM6dLdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6didk6dldm6dndo6dpdq6drds6dtdu6dvdw6dxdy6dzd{6d|d}6d|d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6ddW6ddX6dYdZ6d[d\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6dqdr6d ds6d dt6ddu6dvdw6dxdy6dzd{6dd|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6d"d#6d"d$6d%d&6d'd(6d)d*6d+d,6d+d-6d.d/6d0d16d2d36d4d56d6d76d8d96d:d;6d<d=6d>d?6d>d@6dAdB6dAdC6dDdE6dFdG6dFdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6dd]6d^d_6d`da6dbdc6ddde6dfdg6dhdi6djdk6dldm6ddn6dodp6dqdr6dsdt6dudv6dudw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d)d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6d"d#6d$d%6d&d'6dd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dKdM6dNdO6dPdQ6dRdS6dTdU6dVdW6dVdX6dYdZ6d[d\6d]d^6d_d`6d_da6dbdc6ddde6dfdg6dhdi6djdk6dldm6dndo6dpdq6drds6ddt6dudv6dwdx6dydz6d{d|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dgd6dd6dd6dd 6d d 6d d 6d d6dd6dd6dKd6dKdE6dd6dd6dd6dd6dd6dd6d d!6dd"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6did=6d>d?6d@dA6dBdC6dAdD6dEdF6dGdH6dIdJ6dKdL6dMdF6dAdN6dIdO6dPdQ6dPdR6dSdT6dUdV6dAdW6ddX6dYdZ6dYd[6d\d]6d\d^6dd_6d`da6dbdc6ddde6dfdg6dhdi6djdk6dldm6dndo6dpdq6dpdr6dhds6dtdu6dddv6dwdx6dydz6d~d{6d~d|6d}d~6dfd6dd6dd6dd6dd6dd6dd6dd6dld6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dvd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d}d6d}d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dhd6dd6dd6dd6dd6d d!6djd"6dld#6d$d%6d&d'6d(d)6d*d+6d*d,6dd-6d.d/6dd06dd16d2d36d4d56d6d76d8d96d:d;6d<d=6d>d?6d@dA6dBdC6ddD6dEdF6dGdH6dIdJ6dIdK6dLdM6dNdO6dPdQ6dRdS6ddT6ddU6dVdW6dXdY6dXdZ6dd[6d\d]6d^d_6d`da6d`db6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6ddq6drds6dtdu6dvdw6dxdy6dkdz6d{d|6d}d~6dd6dd6dd6dd6dnd6dnd6dd6dd6dd6dd6dd6dd6d?d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d?d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d5d6dd 6dd 6dd 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6d d 6d d! 6d" d# 6d$ d% 6dzd& 6dd' 6dd( 6d5d) 6dd* 6d~d+ 6d, d- 6d. d/ 6d0 d1 6d2 d3 6d4 d5 6d6 d7 6d8 d9 6d: d; 6dd< 6dd= 6dd> 6d? d@ 6dA dB 6dC dD 6ddE 6d dF 6dG dH 6dG dI 6dJ dK 6dL dM 6dN dO 6dP dQ 6dP dR 6dS dT 6dU dV 6dW dX 6dndY 6dZ d[ 6d\ d] 6d^ d_ 6d` da 6d` db 6dc dd 6de df 6dg dh 6di dj 6dk dl 6dm dn 6do dp 6dq dr 6ds dt 6ds du 6ds dv 6dw dx 6dy dz 6d{ d| 6d} d~ 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dN d 6dS d 6d_d 6dc d 6dm d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6dd 6d_d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dld 6dcd 6dnd 6dZ d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dzd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6d d 6d d 6d d! 6d" d# 6dd$ 6dd% 6d& d' 6d( d) 6dd* 6d+ d, 6d- d. 6d/ d0 6d1 d2 6d3 d4 6d3 d5 6d6 d7 6d6 d8 6d1 d9 6d: d; 6d< d= 6dd> 6d? d@ 6ddA 6dB dC 6dD dE 6ddF 6ddD6dG dH 6dI dJ 6dK dL 6dM dN 6dO dP 6d dQ 6dR dS 6dK dT 6ddU 6d dV 6ddW 6ddX 6dY dZ 6dY d[ 6dd\ 6dd] 6d d^ 6dd_ 6d` da 6d;db 6dc dd 6de df 6dg dh 6di dj 6dk dl 6dk dm 6dn do 6dp dq 6dr ds 6dt du 6dv dw 6dx dy 6dz d{ 6d| d} 6d~ d 6d d 6d d 6d d 6dg d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6d d 6d d 6d d 6dOd 6d d 6d d 6d d 6d d 6dd 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dOd 6d d 6d d 6d d 6d d 6dOd 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6did 6dd 6d d 6d d 6d[d 6d d 6d d 6d d 6d d 6dd 6d d 6d'd 6d d 6d'd 6d! d" 6d# d$ 6d# d% 6d)d& 6d+d' 6d( d) 6d* d+ 6d| d, 6d- d. 6d/ d0 6d1 d2 6d3 d4 6d5 d6 6d7 d8 6d9 d: 6d; d< 6d= d> 6d? d@ 6dA dB 6dC dD 6dE dF 6dG dH 6dI dJ 6dK dL 6dM dN 6d/dO 6dA dP 6dQ dR 6dS dT 6d6dU 6dydV 6dW dX 6dY dZ 6d[ d\ 6d] d^ 6d)d_ 6d3 d` 6d&da 6dSdb 6dc dd 6d;de 6d-df 6ddg 6de dh 6di dj 6dYdk 6d] dl 6d[dm 6dadn 6dado 6dp dq 6dr ds 6dt du 6dv dw 6dx dy 6dz d{ 6d! d| 6d} d~ 6dYd 6d d 6d]d 6dcd 6d d 6d9d 6d d 6d]d 6d d 6d&d 6dSd 6d d 6d d 6d d 6dd 6dc d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d1d 6dmd 6dod 6d d 6dqd 6d- d 6d d 6d d 6d d 6d d 6d d 6d d 6ddd 6d d 6d d 6dd 6d d 6d d 6d-d 6d, d 6dd 6d d 6d d 6d d 6d d 6d d 6d}d 6dcd 6d d 6d d 6dC d 6d8d 6d d 6d d 6dd 6ddC6d d 6d d 6d} d 6di d 6d d 6d d 6d d 6d d 6d d 6dId 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6d2d 6dAd 6dd 6d d 6d d 6d d 6d d 6d#d 6d d 6d d 6d d 6d d 6dd 6dUd 6d d 6dd 6dd! 6d" d# 6dd$ 6d d% 6d& d' 6d( d) 6dn d* 6dd+ 6d, d- 6d. d/ 6dd0 6d1 d2 6dd3 6d4 d5 6d6 d7 6d6 d8 6d9 d: 6d; d< 6dd= 6d> d? 6d@ dA 6dB dC 6dD dE 6ddF 6dG dH 6dI dJ 6dK dL 6ddM 6dN dO 6dP dQ 6ddR 6dS dT 6dU dV 6dW dX 6ddY 6dZ d[ 6dZ d\ 6dd] 6dd^ 6dd_ 6dd` 6dda 6db dc 6dd de 6df dg 6ddh 6di dj 6dk dl 6dm dn 6do dp 6ddq 6dr ds 6dt du 6ddv 6ddw 6dx dy 6ddz 6d{ d| 6dd} 6dd~ 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6d@ d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d> d 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dd 6dd 6d d 6d d 6dd 6dd 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6dG d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6d d 6dd 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6dd 6dd 6dd 6d d 6dd 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d! d" 6d# d$ 6d% d& 6d' d( 6dd) 6dd* 6d+ d, 6drd- 6d. d/ 6d. d0 6dtd1 6dvd2 6d3 d4 6d3 d5 6d6 d7 6dxd8 6d9 d: 6d; d< 6dd= 6d> d? 6d@ dA 6dB dC 6dD dE 6dF dG 6dH dI 6dH dJ 6dK dL 6dM dN 6d0dO 6ddP 6dmdQ 6dR dS 6dT dU 6dIdV 6dW dX 6dY dZ 6d[ d\ 6d] d^ 6d_ d` 6dda 6db dc 6dd de 6df dg 6ddh 6di dj 6dodk 6dl dm 6dn do 6dn dp 6dq dr 6dq ds 6dt du 6dt dv 6dw dx 6dy dz 6d{ d| 6d} d~ 6dn d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dQd 6d d 6d d 6d d 6d d 6d}d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6dg d 6d d 6dg d 6d d 6d d 6dd 6d d 6d" d 6d d 6d d 6d[d 6d[d 6d d 6d d 6d[d 6d d 6d d 6d d 6d d 6dbd 6d d 6d d 6dfd 6ddd 6dbd 6d d 6dfd 6ddd 6d d 6d d 6d d 6dhd 6d d 6d^d 6d d 6d d 6d d 6dld 6d d 6d d 6d d 6dod 6dod 6dhd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd6dd6dd6dd6dd6d d 6dud 6dudG6dd 6dd 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6dd"6d#d$6dd%6d&d'6d(d)6d*d+6d~ d,6d d-6d.d/6d0d16d2d36d4d56d6d76d8d96dzd:6dd;6d<d=6d>d?6d@dA6dBdC6dDdE6dFdG6dHdI6dJdK6ddL6d>dM6dNdO6dPdQ6dRdS6ddT6ddU6dVdW6ddX6ddY6ddZ6dd[6d\d]6dd^6dd_6d`da6ddb6dcdd6d,de6ddf6dgdh6didj6dkdl6ddm6d2dn6d,do6ddp6ddq6dadr6dsdt6d4du6dvdw6dxdy6d dz6dd{6dad|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dsd6dd6dd6dd6d@d6dd6dd6dvd6dd6dd6dd6dd6dd6dd6dd6dd6d d6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d$ d6dd6dd6dt d6dzd6dzd6dd6dd6dd6dd6dvd6dvd6dd6dd6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d;d6dd6d=d6d=d6dd6dd6dd6dd6dd6dd6dd6d)d6dvd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6dd#6d$d%6dd&6dd'6dd(6dd)6dd*6dd+6dd,6dd-6dd.6dd/6dvd06dvd16dd26d3d46dvd56d d66dd76d8d96dd:6d d;6d d<6d d=6d>d?6d@dA6dBdC6d dD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6d>dQ6d dR6d@dS6dKdT6dIdU6dVdW6dXdY6dZd[6d d\6dd]6dd^6dd_6dd`6dda6ddb6ddc6ddd6dedf6dgdh6dgdi6djdk6djdl6dmdn6dmdo6ddp6dqdr6dsdt6dudv6ddw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dqd6dd6dd6dd6dd6dd6dd6dv d6dxd6dxd6dd6dd6dd6dd6dd6dMd6dd6dd6dd6dEd6dd6dd6d3d6d3d6dd6dd6dd6dAd6d;d6d9d6dAd6d;d6dd6dd6dd6dd6dd6dd6dd6dd6d d6d{ d6d0d6dd6dd6dd6dd6dd6dd6d"d6dd6d: d6d d6dId6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dwd6dd6d{d6d d 6d d 6d d6d d6dOd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6dd#6dyd$6dOd%6dd&6dnd'6d(d)6dd*6d(d+6d,d-6d.d/6d.d06d1d26d3d46d5d66d7d86d9d:6d;d<6dd=6dd>6d,d?6d@dA6d@dB6dCdD6ddE6dFdG6dHdI6ddJ6dKdL6d dM6d dN6ds dO6d dP6d dQ6dodR6dydS6dkdT6ddU6dVdW6dXdY6dZd[6d\d]6dd^6dEd_6dd`6dadb6ddc6di dd6dedf6dgdh6didj6ddk6ddl6dmdn6dEdo6ddp6ddq6drds6dodt6ddu6dvdw6dXdx6dVdy6d\dz6dZd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dld6dd6dd6d d6dod6dd6d d6dmd6d d6dd6dd6dd6dd6dd6dd6dqd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Z/i"dd6d d6d d6dd6d d6d d6dyd6dn d6dd6dd6did6d d6dd6d d6dd6dd6dd6dd6dd6d8d6dd6d6d6dd6d*d6do d6d d6dd6d"d6dd6dd6d@ d6dd6dd6dd6Z0idd6dd6d d 6d d 6d d6dd6dd6dd6Z1e e1d e1de1dgZ2egejD]\Z3Z4e4e3f^qDNZ5de5d' instead.u'expected-tag-name-but-got-right-bracketuSExpected tag name. Got '?' instead. (HTML doesn't support processing instructions.)u'expected-tag-name-but-got-question-marku-Expected tag name. Got something else insteaduexpected-tag-nameu6Expected closing tag. Got '>' instead. Ignoring ''.u*expected-closing-tag-but-got-right-bracketu-Expected closing tag. Unexpected end of file.u expected-closing-tag-but-got-eofu<Expected closing tag. Unexpected character '%(data)s' found.u!expected-closing-tag-but-got-charu'Unexpected end of file in the tag name.ueof-in-tag-nameu8Unexpected end of file. Expected attribute name instead.u#expected-attribute-name-but-got-eofu)Unexpected end of file in attribute name.ueof-in-attribute-nameu#Invalid character in attribute nameu#invalid-character-in-attribute-nameu#Dropped duplicate attribute on tag.uduplicate-attributeu1Unexpected end of file. Expected = or end of tag.u$expected-end-of-tag-name-but-got-eofu1Unexpected end of file. Expected attribute value.u$expected-attribute-value-but-got-eofu*Expected attribute value. Got '>' instead.u.expected-attribute-value-but-got-right-bracketu"Unexpected = in unquoted attributeu"equals-in-unquoted-attribute-valueu*Unexpected character in unquoted attributeu0unexpected-character-in-unquoted-attribute-valueu*Unexpected character after attribute name.u&invalid-character-after-attribute-nameu+Unexpected character after attribute value.u*unexpected-character-after-attribute-valueu.Unexpected end of file in attribute value (").u#eof-in-attribute-value-double-quoteu.Unexpected end of file in attribute value (').u#eof-in-attribute-value-single-quoteu*Unexpected end of file in attribute value.u eof-in-attribute-value-no-quotesu)Unexpected end of file in tag. Expected >u#unexpected-EOF-after-solidus-in-tagu/Unexpected character after / in tag. Expected >u)unexpected-character-after-solidus-in-tagu&Expected '--' or 'DOCTYPE'. Not found.uexpected-dashes-or-doctypeu Unexpected ! after -- in commentu,unexpected-bang-after-double-dash-in-commentu$Unexpected space after -- in commentu-unexpected-space-after-double-dash-in-commentuIncorrect comment.uincorrect-commentu"Unexpected end of file in comment.ueof-in-commentu%Unexpected end of file in comment (-)ueof-in-comment-end-dashu+Unexpected '-' after '--' found in comment.u,unexpected-dash-after-double-dash-in-commentu'Unexpected end of file in comment (--).ueof-in-comment-double-dashueof-in-comment-end-space-stateueof-in-comment-end-bang-stateu&Unexpected character in comment found.uunexpected-char-in-commentu(No space after literal string 'DOCTYPE'.uneed-space-after-doctypeu.Unexpected > character. Expected DOCTYPE name.u+expected-doctype-name-but-got-right-bracketu.Unexpected end of file. Expected DOCTYPE name.u!expected-doctype-name-but-got-eofu'Unexpected end of file in DOCTYPE name.ueof-in-doctype-nameu"Unexpected end of file in DOCTYPE.ueof-in-doctypeu%Expected space or '>'. Got '%(data)s'u*expected-space-or-right-bracket-in-doctypeuUnexpected end of DOCTYPE.uunexpected-end-of-doctypeu Unexpected character in DOCTYPE.uunexpected-char-in-doctypeuXXX innerHTML EOFueof-in-innerhtmluUnexpected DOCTYPE. Ignored.uunexpected-doctypeu%html needs to be the first start tag.u non-html-rootu)Unexpected End of file. Expected DOCTYPE.uexpected-doctype-but-got-eofuErroneous DOCTYPE.uunknown-doctypeu2Unexpected non-space characters. Expected DOCTYPE.uexpected-doctype-but-got-charsu2Unexpected start tag (%(name)s). Expected DOCTYPE.u"expected-doctype-but-got-start-tagu0Unexpected end tag (%(name)s). Expected DOCTYPE.u expected-doctype-but-got-end-tagu?Unexpected end tag (%(name)s) after the (implied) root element.uend-tag-after-implied-rootu4Unexpected end of file. Expected end tag (%(name)s).u&expected-named-closing-tag-but-got-eofu4Unexpected start tag head in existing head. Ignored.u!two-heads-are-not-better-than-oneu'Unexpected end tag (%(name)s). Ignored.uunexpected-end-tagu;Unexpected start tag (%(name)s) that can be in head. Moved.u#unexpected-start-tag-out-of-my-headu Unexpected start tag (%(name)s).uunexpected-start-taguMissing end tag (%(name)s).umissing-end-taguMissing end tags (%(name)s).umissing-end-tagsuCUnexpected start tag (%(startName)s) implies end tag (%(endName)s).u$unexpected-start-tag-implies-end-tagu@Unexpected start tag (%(originalName)s). Treated as %(newName)s.uunexpected-start-tag-treated-asu,Unexpected start tag %(name)s. Don't use it!udeprecated-tagu'Unexpected start tag %(name)s. Ignored.uunexpected-start-tag-ignoreduEUnexpected end tag (%(gotName)s). Missing end tag (%(expectedName)s).u$expected-one-end-tag-but-got-anotheru:End tag (%(name)s) seen too early. Expected other end tag.uend-tag-too-earlyuFUnexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).uend-tag-too-early-namedu+End tag (%(name)s) seen too early. Ignored.uend-tag-too-early-ignoreduQEnd tag (%(name)s) violates step 1, paragraph 1 of the adoption agency algorithm.uadoption-agency-1.1uQEnd tag (%(name)s) violates step 1, paragraph 2 of the adoption agency algorithm.uadoption-agency-1.2uQEnd tag (%(name)s) violates step 1, paragraph 3 of the adoption agency algorithm.uadoption-agency-1.3uQEnd tag (%(name)s) violates step 4, paragraph 4 of the adoption agency algorithm.uadoption-agency-4.4u>Unexpected end tag (%(originalName)s). Treated as %(newName)s.uunexpected-end-tag-treated-asu'This element (%(name)s) has no end tag.u no-end-tagu9Unexpected implied end tag (%(name)s) in the table phase.u#unexpected-implied-end-tag-in-tableu>Unexpected implied end tag (%(name)s) in the table body phase.u(unexpected-implied-end-tag-in-table-bodyuDUnexpected non-space characters in table context caused voodoo mode.u$unexpected-char-implies-table-voodoou3Unexpected input with type hidden in table context.u unexpected-hidden-input-in-tableu!Unexpected form in table context.uunexpected-form-in-tableuDUnexpected start tag (%(name)s) in table context caused voodoo mode.u)unexpected-start-tag-implies-table-voodoouBUnexpected end tag (%(name)s) in table context caused voodoo mode.u'unexpected-end-tag-implies-table-voodoouCUnexpected table cell start tag (%(name)s) in the table body phase.uunexpected-cell-in-table-bodyuFGot table cell end tag (%(name)s) while required end tags are missing.uunexpected-cell-end-tagu?Unexpected end tag (%(name)s) in the table body phase. Ignored.u unexpected-end-tag-in-table-bodyu=Unexpected implied end tag (%(name)s) in the table row phase.u'unexpected-implied-end-tag-in-table-rowu>Unexpected end tag (%(name)s) in the table row phase. Ignored.uunexpected-end-tag-in-table-rowuJUnexpected select start tag in the select phase treated as select end tag.uunexpected-select-in-selectu/Unexpected input start tag in the select phase.uunexpected-input-in-selectuBUnexpected start tag token (%(name)s in the select phase. Ignored.uunexpected-start-tag-in-selectu;Unexpected end tag (%(name)s) in the select phase. Ignored.uunexpected-end-tag-in-selectuKUnexpected table element start tag (%(name)s) in the select in table phase.u5unexpected-table-element-start-tag-in-select-in-tableuIUnexpected table element end tag (%(name)s) in the select in table phase.u3unexpected-table-element-end-tag-in-select-in-tableu8Unexpected non-space characters in the after body phase.uunexpected-char-after-bodyu>Unexpected start tag token (%(name)s) in the after body phase.uunexpected-start-tag-after-bodyu<Unexpected end tag token (%(name)s) in the after body phase.uunexpected-end-tag-after-bodyu@Unexpected characters in the frameset phase. Characters ignored.uunexpected-char-in-framesetuEUnexpected start tag token (%(name)s) in the frameset phase. Ignored.u unexpected-start-tag-in-framesetuFUnexpected end tag token (frameset) in the frameset phase (innerHTML).u)unexpected-frameset-in-frameset-innerhtmluCUnexpected end tag token (%(name)s) in the frameset phase. Ignored.uunexpected-end-tag-in-framesetuEUnexpected non-space characters in the after frameset phase. Ignored.uunexpected-char-after-framesetuEUnexpected start tag (%(name)s) in the after frameset phase. Ignored.u#unexpected-start-tag-after-framesetuCUnexpected end tag (%(name)s) in the after frameset phase. Ignored.u!unexpected-end-tag-after-framesetu(Unexpected end tag after body(innerHtml)u'unexpected-end-tag-after-body-innerhtmlu6Unexpected non-space characters. Expected end of file.uexpected-eof-but-got-charu6Unexpected start tag (%(name)s). Expected end of file.uexpected-eof-but-got-start-tagu4Unexpected end tag (%(name)s). Expected end of file.uexpected-eof-but-got-end-tagu/Unexpected end of file. Expected table content.u eof-in-tableu0Unexpected end of file. Expected select content.u eof-in-selectu2Unexpected end of file. Expected frameset content.ueof-in-framesetu0Unexpected end of file. Expected script content.ueof-in-script-in-scriptu0Unexpected end of file. Expected foreign contentueof-in-foreign-landsu0Trailing solidus not allowed on element %(name)su&non-void-element-with-trailing-solidusu2Element %(name)s not allowed in a non-html contextu*unexpected-html-element-in-foreign-contentu*Unexpected end tag (%(name)s) before html.uunexpected-end-tag-before-htmlu9Element %(name)s not allowed in a inhead-noscript contextuunexpected-inhead-noscript-tagu8Unexpected end of file. Expected inhead-noscript contentueof-in-head-noscriptu@Unexpected non-space character. Expected inhead-noscript contentuchar-in-head-noscriptu0Undefined error (this sucks and should be fixed)uXXX-undefined-erroruhttp://www.w3.org/1999/xhtmluhtmlu"http://www.w3.org/1998/Math/MathMLumathmluhttp://www.w3.org/2000/svgusvguhttp://www.w3.org/1999/xlinkuxlinku$http://www.w3.org/XML/1998/namespaceuxmluhttp://www.w3.org/2000/xmlns/uxmlnsuappletucaptionumarqueeuobjectutableutduthumiumoumnumsumtextuannotation-xmlu foreignObjectudescutitleuaububigucodeuemufontuiunobrususmallustrikeustronguttuuuaddressuareauarticleuasideubaseubasefontubgsoundu blockquoteubodyubrubuttonucenterucolucolgroupucommanduddudetailsudirudivudludtuembedufieldsetufigureufooteruformuframeuframesetuh1uh2uh3uh4uh5uh6uheaduheaderuhruiframeuimageuimguinputuisindexuliulinkulistingumenuumetaunavunoembedunoframesunoscriptuolupuparamu plaintextupreuscriptusectionuselectustyleutbodyutextareautfootutheadutruuluwbruxmpu annotaion-xmlu attributeNameu attributenameu attributeTypeu attributetypeu baseFrequencyu basefrequencyu baseProfileu baseprofileucalcModeucalcmodeu clipPathUnitsu clippathunitsucontentScriptTypeucontentscripttypeucontentStyleTypeucontentstyletypeudiffuseConstantudiffuseconstantuedgeModeuedgemodeuexternalResourcesRequireduexternalresourcesrequiredu filterResu filterresu filterUnitsu filterunitsuglyphRefuglyphrefugradientTransformugradienttransformu gradientUnitsu gradientunitsu kernelMatrixu kernelmatrixukernelUnitLengthukernelunitlengthu keyPointsu keypointsu keySplinesu keysplinesukeyTimesukeytimesu lengthAdjustu lengthadjustulimitingConeAngleulimitingconeangleu markerHeightu markerheightu markerUnitsu markerunitsu markerWidthu markerwidthumaskContentUnitsumaskcontentunitsu maskUnitsu maskunitsu numOctavesu numoctavesu pathLengthu pathlengthupatternContentUnitsupatterncontentunitsupatternTransformupatterntransformu patternUnitsu patternunitsu pointsAtXu pointsatxu pointsAtYu pointsatyu pointsAtZu pointsatzu preserveAlphau preservealphaupreserveAspectRatioupreserveaspectratiouprimitiveUnitsuprimitiveunitsurefXurefxurefYurefyu repeatCountu repeatcountu repeatDuru repeatdururequiredExtensionsurequiredextensionsurequiredFeaturesurequiredfeaturesuspecularConstantuspecularconstantuspecularExponentuspecularexponentu spreadMethodu spreadmethodu startOffsetu startoffsetu stdDeviationu stddeviationu stitchTilesu stitchtilesu surfaceScaleu surfacescaleusystemLanguageusystemlanguageu tableValuesu tablevaluesutargetXutargetxutargetYutargetyu textLengthu textlengthuviewBoxuviewboxu viewTargetu viewtargetuxChannelSelectoruxchannelselectoruyChannelSelectoruychannelselectoru zoomAndPanu zoomandpanu definitionURLu definitionurluactuateu xlink:actuateuarcroleu xlink:arcroleuhrefu xlink:hrefuroleu xlink:roleushowu xlink:showu xlink:titleutypeu xlink:typeuxml:baseulanguxml:languspaceu xml:spaceu xmlns:xlinku u u u u u event-sourceusourceutracku irrelevantuuscopeduismapuautoplayucontrolsuaudiouvideoudeferuasyncuopenumultipleudisabledudatagriduhiddenucheckedudefaultunoshadeu autosubmitureadonlyuselecteduoptionuoptgroupu autofocusurequireduoutputi ii ii i& i i! ii0 i`i9 iRi}i i i i i" i i ii"!iai: iSi~ixult;ugt;uamp;uapos;uquot;uÆuAEliguAElig;u&uAMPuAMP;uÁuAacuteuAacute;uĂuAbreve;uÂuAcircuAcirc;uАuAcy;u𝔄uAfr;uÀuAgraveuAgrave;uΑuAlpha;uĀuAmacr;u⩓uAnd;uĄuAogon;u𝔸uAopf;u⁡uApplyFunction;uÅuAringuAring;u𝒜uAscr;u≔uAssign;uÃuAtildeuAtilde;uÄuAumluAuml;u∖u Backslash;u⫧uBarv;u⌆uBarwed;uБuBcy;u∵uBecause;uℬu Bernoullis;uΒuBeta;u𝔅uBfr;u𝔹uBopf;u˘uBreve;uBscr;u≎uBumpeq;uЧuCHcy;u©uCOPYuCOPY;uĆuCacute;u⋒uCap;uⅅuCapitalDifferentialD;uℭuCayleys;uČuCcaron;uÇuCcediluCcedil;uĈuCcirc;u∰uCconint;uĊuCdot;u¸uCedilla;u·u CenterDot;uCfr;uΧuChi;u⊙u CircleDot;u⊖u CircleMinus;u⊕u CirclePlus;u⊗u CircleTimes;u∲uClockwiseContourIntegral;u”uCloseCurlyDoubleQuote;u’uCloseCurlyQuote;u∷uColon;u⩴uColone;u≡u Congruent;u∯uConint;u∮uContourIntegral;uℂuCopf;u∐u Coproduct;u∳u CounterClockwiseContourIntegral;u⨯uCross;u𝒞uCscr;u⋓uCup;u≍uCupCap;uDD;u⤑u DDotrahd;uЂuDJcy;uЅuDScy;uЏuDZcy;u‡uDagger;u↡uDarr;u⫤uDashv;uĎuDcaron;uДuDcy;u∇uDel;uΔuDelta;u𝔇uDfr;u´uDiacriticalAcute;u˙uDiacriticalDot;u˝uDiacriticalDoubleAcute;u`uDiacriticalGrave;u˜uDiacriticalTilde;u⋄uDiamond;uⅆuDifferentialD;u𝔻uDopf;u¨uDot;u⃜uDotDot;u≐u DotEqual;uDoubleContourIntegral;u DoubleDot;u⇓uDoubleDownArrow;u⇐uDoubleLeftArrow;u⇔uDoubleLeftRightArrow;uDoubleLeftTee;u⟸uDoubleLongLeftArrow;u⟺uDoubleLongLeftRightArrow;u⟹uDoubleLongRightArrow;u⇒uDoubleRightArrow;u⊨uDoubleRightTee;u⇑uDoubleUpArrow;u⇕uDoubleUpDownArrow;u∥uDoubleVerticalBar;u↓u DownArrow;u⤓u DownArrowBar;u⇵uDownArrowUpArrow;ȗu DownBreve;u⥐uDownLeftRightVector;u⥞uDownLeftTeeVector;u↽uDownLeftVector;u⥖uDownLeftVectorBar;u⥟uDownRightTeeVector;u⇁uDownRightVector;u⥗uDownRightVectorBar;u⊤uDownTee;u↧u DownTeeArrow;u Downarrow;u𝒟uDscr;uĐuDstrok;uŊuENG;uÐuETHuETH;uÉuEacuteuEacute;uĚuEcaron;uÊuEcircuEcirc;uЭuEcy;uĖuEdot;u𝔈uEfr;uÈuEgraveuEgrave;u∈uElement;uĒuEmacr;u◻uEmptySmallSquare;u▫uEmptyVerySmallSquare;uĘuEogon;u𝔼uEopf;uΕuEpsilon;u⩵uEqual;u≂u EqualTilde;u⇌u Equilibrium;uℰuEscr;u⩳uEsim;uΗuEta;uËuEumluEuml;u∃uExists;uⅇu ExponentialE;uФuFcy;u𝔉uFfr;u◼uFilledSmallSquare;u▪uFilledVerySmallSquare;u𝔽uFopf;u∀uForAll;uℱu Fouriertrf;uFscr;uЃuGJcy;u>uGTuGT;uΓuGamma;uϜuGammad;uĞuGbreve;uĢuGcedil;uĜuGcirc;uГuGcy;uĠuGdot;u𝔊uGfr;u⋙uGg;u𝔾uGopf;u≥u GreaterEqual;u⋛uGreaterEqualLess;u≧uGreaterFullEqual;u⪢uGreaterGreater;u≷u GreaterLess;u⩾uGreaterSlantEqual;u≳u GreaterTilde;u𝒢uGscr;u≫uGt;uЪuHARDcy;uˇuHacek;u^uHat;uĤuHcirc;uℌuHfr;uℋu HilbertSpace;uℍuHopf;u─uHorizontalLine;uHscr;uĦuHstrok;u HumpDownHump;u≏u HumpEqual;uЕuIEcy;uIJuIJlig;uЁuIOcy;uÍuIacuteuIacute;uÎuIcircuIcirc;uИuIcy;uİuIdot;uℑuIfr;uÌuIgraveuIgrave;uIm;uĪuImacr;uⅈu ImaginaryI;uImplies;u∬uInt;u∫u Integral;u⋂u Intersection;u⁣uInvisibleComma;u⁢uInvisibleTimes;uĮuIogon;u𝕀uIopf;uΙuIota;uℐuIscr;uĨuItilde;uІuIukcy;uÏuIumluIuml;uĴuJcirc;uЙuJcy;u𝔍uJfr;u𝕁uJopf;u𝒥uJscr;uЈuJsercy;uЄuJukcy;uХuKHcy;uЌuKJcy;uΚuKappa;uĶuKcedil;uКuKcy;u𝔎uKfr;u𝕂uKopf;u𝒦uKscr;uЉuLJcy;u⃒unvgt;u⧞unvinfin;u⤂unvlArr;u≤⃒unvle;u<⃒unvlt;u⊴⃒unvltrie;u⤃unvrArr;u⊵⃒unvrtrie;u∼⃒unvsim;u⇖unwArr;u⤣unwarhk;unwarr;unwarrow;u⤧unwnear;uoS;uóuoacuteuoacute;uoast;uocir;uôuocircuocirc;uоuocy;uodash;uőuodblac;u⨸uodiv;uodot;u⦼uodsold;uœuoelig;u⦿uofcir;u𝔬uofr;u˛uogon;uòuograveuograve;u⧁uogt;u⦵uohbar;uohm;uoint;uolarr;u⦾uolcir;u⦻uolcross;uoline;u⧀uolt;uōuomacr;uωuomega;uοuomicron;u⦶uomid;uominus;u𝕠uoopf;u⦷uopar;u⦹uoperp;uoplus;u∨uor;uorarr;u⩝uord;uℴuorder;uorderof;uªuordfuordf;uºuordmuordm;u⊶uorigof;u⩖uoror;u⩗uorslope;u⩛uorv;uoscr;uøuoslashuoslash;u⊘uosol;uõuotildeuotilde;uotimes;u⨶u otimesas;uöuoumluouml;u⌽uovbar;upar;u¶uparaupara;u parallel;u⫳uparsim;u⫽uparsl;upart;uпupcy;u%upercnt;u.uperiod;u‰upermil;uperp;u‱upertenk;u𝔭upfr;uφuphi;uϕuphiv;uphmmat;u☎uphone;uπupi;u pitchfork;uϖupiv;uplanck;uℎuplanckh;uplankv;u+uplus;u⨣u plusacir;uplusb;u⨢upluscir;uplusdo;u⨥uplusdu;u⩲upluse;uplusmnuplusmn;u⨦uplussim;u⨧uplustwo;upm;u⨕u pointint;u𝕡upopf;u£upoundupound;upr;u⪳uprE;u⪷uprap;uprcue;upre;uprec;u precapprox;u preccurlyeq;upreceq;u⪹u precnapprox;u⪵u precneqq;u⋨u precnsim;uprecsim;u′uprime;uprimes;uprnE;uprnap;uprnsim;uprod;u⌮u profalar;u⌒u profline;u⌓u profsurf;uprop;upropto;uprsim;u⊰uprurel;u𝓅upscr;uψupsi;u upuncsp;u𝔮uqfr;uqint;u𝕢uqopf;u⁗uqprime;u𝓆uqscr;u quaternions;u⨖uquatint;u?uquest;uquesteq;uquoturAarr;urArr;u⤜urAtail;urBarr;u⥤urHar;u∽̱urace;uŕuracute;uradic;u⦳u raemptyv;urang;u⦒urangd;u⦥urange;urangle;u»uraquouraquo;urarr;u⥵urarrap;urarrb;u⤠urarrbfs;u⤳urarrc;u⤞urarrfs;urarrhk;urarrlp;u⥅urarrpl;u⥴urarrsim;u↣urarrtl;u↝urarrw;u⤚uratail;u∶uratio;u rationals;urbarr;u❳urbbrk;u}urbrace;u]urbrack;u⦌urbrke;u⦎urbrksld;u⦐urbrkslu;uřurcaron;uŗurcedil;urceil;urcub;uрurcy;u⤷urdca;u⥩urdldhar;urdquo;urdquor;u↳urdsh;ureal;urealine;u realpart;ureals;u▭urect;uregureg;u⥽urfisht;urfloor;u𝔯urfr;urhard;urharu;u⥬urharul;uρurho;uϱurhov;u rightarrow;urightarrowtail;urightharpoondown;urightharpoonup;urightleftarrows;urightleftharpoons;u⇉urightrightarrows;urightsquigarrow;u⋌urightthreetimes;u˚uring;u risingdotseq;urlarr;urlhar;u‏urlm;u⎱urmoust;u rmoustache;u⫮urnmid;u⟭uroang;u⇾uroarr;urobrk;u⦆uropar;u𝕣uropf;u⨮uroplus;u⨵urotimes;u)urpar;u⦔urpargt;u⨒u rppolint;urrarr;u›ursaquo;u𝓇urscr;ursh;ursqb;ursquo;ursquor;urthree;u⋊urtimes;u▹urtri;urtrie;urtrif;u⧎u rtriltri;u⥨uruluhar;u℞urx;uśusacute;usbquo;usc;u⪴uscE;u⪸uscap;ušuscaron;usccue;usce;uşuscedil;uŝuscirc;u⪶uscnE;u⪺uscnap;u⋩uscnsim;u⨓u scpolint;uscsim;uсuscy;u⋅usdot;usdotb;u⩦usdote;u⇘useArr;usearhk;usearr;usearrow;u§usectusect;u;usemi;u⤩useswar;u setminus;usetmn;u✶usext;u𝔰usfr;usfrown;u♯usharp;uщushchcy;uшushcy;u shortmid;ushortparallel;u­ushyushy;uσusigma;uςusigmaf;usigmav;usim;u⩪usimdot;usime;usimeq;u⪞usimg;u⪠usimgE;u⪝usiml;u⪟usimlE;u≆usimne;u⨤usimplus;u⥲usimrarr;uslarr;usmallsetminus;u⨳usmashp;u⧤u smeparsl;usmid;u⌣usmile;u⪪usmt;u⪬usmte;u⪬︀usmtes;uьusoftcy;u/usol;u⧄usolb;u⌿usolbar;u𝕤usopf;u♠uspades;u spadesuit;uspar;usqcap;u⊓︀usqcaps;usqcup;u⊔︀usqcups;usqsub;usqsube;u sqsubset;u sqsubseteq;usqsup;usqsupe;u sqsupset;u sqsupseteq;usqu;usquare;usquarf;usquf;usrarr;u𝓈usscr;ussetmn;ussmile;usstarf;u☆ustar;ustarf;ustraightepsilon;u straightphi;ustrns;u⊂usub;u⫅usubE;u⪽usubdot;usube;u⫃usubedot;u⫁usubmult;u⫋usubnE;u⊊usubne;u⪿usubplus;u⥹usubrarr;usubset;u subseteq;u subseteqq;u subsetneq;u subsetneqq;u⫇usubsim;u⫕usubsub;u⫓usubsup;usucc;u succapprox;u succcurlyeq;usucceq;u succnapprox;u succneqq;u succnsim;usuccsim;usum;u♪usung;u¹usup1usup1;u²usup2usup2;u³usup3usup3;usup;u⫆usupE;u⪾usupdot;u⫘usupdsub;usupe;u⫄usupedot;u⟉usuphsol;u⫗usuphsub;u⥻usuplarr;u⫂usupmult;u⫌usupnE;u⊋usupne;u⫀usupplus;usupset;u supseteq;u supseteqq;u supsetneq;u supsetneqq;u⫈usupsim;u⫔usupsub;u⫖usupsup;u⇙uswArr;uswarhk;uswarr;uswarrow;u⤪uswnwar;ußuszliguszlig;u⌖utarget;uτutau;utbrk;uťutcaron;uţutcedil;uтutcy;utdot;u⌕utelrec;u𝔱utfr;uthere4;u therefore;uθutheta;uϑu thetasym;uthetav;u thickapprox;u thicksim;uthinsp;uthkap;uthksim;uþuthornuthorn;utilde;u×utimesutimes;utimesb;u⨱u timesbar;u⨰utimesd;utint;utoea;utop;u⌶utopbot;u⫱utopcir;u𝕥utopf;u⫚utopfork;utosa;u‴utprime;utrade;u▵u triangle;u triangledown;u triangleleft;utrianglelefteq;u≜u triangleq;utriangleright;utrianglerighteq;u◬utridot;utrie;u⨺u triminus;u⨹utriplus;u⧍utrisb;u⨻utritime;u⏢u trpezium;u𝓉utscr;uцutscy;uћutshcy;uŧutstrok;utwixt;utwoheadleftarrow;utwoheadrightarrow;uuArr;u⥣uuHar;uúuuacuteuuacute;uuarr;uўuubrcy;uŭuubreve;uûuucircuucirc;uуuucy;uudarr;uűuudblac;uudhar;u⥾uufisht;u𝔲uufr;uùuugraveuugrave;uuharl;uuharr;u▀uuhblk;u⌜uulcorn;u ulcorner;u⌏uulcrop;u◸uultri;uūuumacr;uumluuml;uųuuogon;u𝕦uuopf;uuparrow;u updownarrow;uupharpoonleft;uupharpoonright;uuplus;uυuupsi;uupsih;uupsilon;u⇈u upuparrows;u⌝uurcorn;u urcorner;u⌎uurcrop;uůuuring;u◹uurtri;u𝓊uuscr;u⋰uutdot;uũuutilde;uutri;uutrif;uuuarr;uüuuumluuuml;u⦧uuwangle;uvArr;u⫨uvBar;u⫩uvBarv;uvDash;u⦜uvangrt;u varepsilon;u varkappa;u varnothing;uvarphi;uvarpi;u varpropto;uvarr;uvarrho;u varsigma;u⊊︀u varsubsetneq;u⫋︀uvarsubsetneqq;u⊋︀u varsupsetneq;u⫌︀uvarsupsetneqq;u vartheta;uvartriangleleft;uvartriangleright;uвuvcy;uvdash;uvee;u⊻uveebar;u≚uveeeq;u⋮uvellip;uverbar;uvert;u𝔳uvfr;uvltri;uvnsub;uvnsup;u𝕧uvopf;uvprop;uvrtri;u𝓋uvscr;uvsubnE;uvsubne;uvsupnE;uvsupne;u⦚uvzigzag;uŵuwcirc;u⩟uwedbar;uwedge;u≙uwedgeq;u℘uweierp;u𝔴uwfr;u𝕨uwopf;uwp;uwr;uwreath;u𝓌uwscr;uxcap;uxcirc;uxcup;uxdtri;u𝔵uxfr;uxhArr;uxharr;uξuxi;uxlArr;uxlarr;uxmap;u⋻uxnis;uxodot;u𝕩uxopf;uxoplus;uxotime;uxrArr;uxrarr;u𝓍uxscr;uxsqcup;uxuplus;uxutri;uxvee;uxwedge;uýuyacuteuyacute;uяuyacy;uŷuycirc;uыuycy;u¥uyenuyen;u𝔶uyfr;uїuyicy;u𝕪uyopf;u𝓎uyscr;uюuyucy;uÿuyumluyuml;uźuzacute;užuzcaron;uзuzcy;użuzdot;uzeetrf;uζuzeta;u𝔷uzfr;uжuzhcy;u⇝uzigrarr;u𝕫uzopf;u𝓏uzscr;u‍uzwj;u‌uzwnj;u�i iuiiiiiiiiiiiiuiiuiuiiiiiiiiiiiiiuiiiuDoctypeiu CharactersiuSpaceCharactersiuStartTagiuEndTagiuEmptyTagiuCommentiu ParseErrorumathtDataLossWarningcB`seZRS((t__name__t __module__(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyR| stReparseExceptioncB`seZRS((RR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyR s(uh1uh2uh3uh4uh5uh6( i ii ii i& i i! ii0 i`i9 iRii}iii i i i i" i i ii"!iai: iSii~ix(:t __future__RRRtstringtNonetEOFtEt namespacest frozensettscopingElementstformattingElementstspecialElementsthtmlIntegrationPointElementst"mathmlTextIntegrationPointElementstadjustSVGAttributestadjustMathMLAttributestadjustForeignAttributestdicttitemstqnametprefixtlocaltnstunadjustForeignAttributestspaceCharactersttableInsertModeElementstascii_lowercasetasciiLowercasetascii_uppercasetasciiUppercaset ascii_letterst asciiLetterstdigitst hexdigitst hexDigitstctordtlowertasciiUpper2LowertheadingElementst voidElementst cdataElementstrcdataElementstbooleanAttributestentitiesWindows1252t xmlEntitiestentitiestreplacementCharacterst tokenTypest tagTokenTypestktvtprefixest UserWarningRt ExceptionR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pytsL                                                                                                                      :  4     1 PK.e[xj==html5lib/filters/lint.pyonu[ abc@`sddlmZmZmZddlmZddlmZddlm Z m Z ddlm Z dj e Z d ej fd YZ d S( i(tabsolute_importtdivisiontunicode_literals(t text_typei(tbasei(t namespacest voidElements(tspaceCharactersutFiltercB`seZedZdZRS(cC`s#tt|j|||_dS(N(tsuperRt__init__trequire_matching_tags(tselftsourceR ((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyR sc c`sg}xtjj|D]}|d}|dkr|d}|d}| s`|tdkro|tkron|dkr|jr|j||fnx|djD]\\}}}qWn|dkr%|d}|d}| s|tdkr |tkr q|jr|j}qnx|d kr>|d}n_|dkrf|d}|d krqn7|d kr|d}n|d krn|dkrn|VqWdS(NutypeuStartTaguEmptyTagu namespaceunameuhtmludatauEndTaguCommentu CharactersuSpaceCharactersuDoctypeuEntityuSerializerError(uStartTaguEmptyTag(u CharactersuSpaceCharacters( RRt__iter__RRR tappendtitemstpop( R t open_elementsttokenttypet namespacetnametvaluetstarttdata((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyRsF    ##   #           (t__name__t __module__tTrueR R(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyR s N(t __future__RRRtpip._vendor.sixRtRt constantsRRRtjoinR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyts PK.e[jsshtml5lib/filters/whitespace.pynu[from __future__ import absolute_import, division, unicode_literals import re from . import base from ..constants import rcdataElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) class Filter(base.Filter): spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) def __iter__(self): preserve = 0 for token in base.Filter.__iter__(self): type = token["type"] if type == "StartTag" \ and (preserve or token["name"] in self.spacePreserveElements): preserve += 1 elif type == "EndTag" and preserve: preserve -= 1 elif not preserve and type == "SpaceCharacters" and token["data"]: # Test on token["data"] above to not introduce spaces where there were not token["data"] = " " elif not preserve and type == "Characters": token["data"] = collapse_spaces(token["data"]) yield token def collapse_spaces(text): return SPACES_REGEX.sub(' ', text) PK.e["html5lib/filters/base.pynu[from __future__ import absolute_import, division, unicode_literals class Filter(object): def __init__(self, source): self.source = source def __iter__(self): return iter(self.source) def __getattr__(self, name): return getattr(self.source, name) PK.e[n|mm*html5lib/filters/alphabeticalattributes.pynu[from __future__ import absolute_import, division, unicode_literals from . import base try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict class Filter(base.Filter): def __iter__(self): for token in base.Filter.__iter__(self): if token["type"] in ("StartTag", "EmptyTag"): attrs = OrderedDict() for name, value in sorted(token["data"].items(), key=lambda x: x[0]): attrs[name] = value token["data"] = attrs yield token PK.e[!$&)&) html5lib/filters/optionaltags.pynu[from __future__ import absolute_import, division, unicode_literals from . import base class Filter(base.Filter): def slider(self): previous1 = previous2 = None for token in self.source: if previous1 is not None: yield previous2, previous1, token previous2 = previous1 previous1 = token if previous1 is not None: yield previous2, previous1, None def __iter__(self): for previous, token, next in self.slider(): type = token["type"] if type == "StartTag": if (token["data"] or not self.is_optional_start(token["name"], previous, next)): yield token elif type == "EndTag": if not self.is_optional_end(token["name"], next): yield token else: yield token def is_optional_start(self, tagname, previous, next): type = next and next["type"] or None if tagname in 'html': # An html element's start tag may be omitted if the first thing # inside the html element is not a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname == 'head': # A head element's start tag may be omitted if the first thing # inside the head element is an element. # XXX: we also omit the start tag if the head element is empty if type in ("StartTag", "EmptyTag"): return True elif type == "EndTag": return next["name"] == "head" elif tagname == 'body': # A body element's start tag may be omitted if the first thing # inside the body element is not a space character or a comment, # except if the first thing inside the body element is a script # or style element and the node immediately preceding the body # element is a head element whose end tag has been omitted. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we do not look at the preceding event, so we never omit # the body element's start tag if it's followed by a script or # a style element. return next["name"] not in ('script', 'style') else: return True elif tagname == 'colgroup': # A colgroup element's start tag may be omitted if the first thing # inside the colgroup element is a col element, and if the element # is not immediately preceded by another colgroup element whose # end tag has been omitted. if type in ("StartTag", "EmptyTag"): # XXX: we do not look at the preceding event, so instead we never # omit the colgroup element's end tag when it is immediately # followed by another colgroup element. See is_optional_end. return next["name"] == "col" else: return False elif tagname == 'tbody': # A tbody element's start tag may be omitted if the first thing # inside the tbody element is a tr element, and if the element is # not immediately preceded by a tbody, thead, or tfoot element # whose end tag has been omitted. if type == "StartTag": # omit the thead and tfoot elements' end tag when they are # immediately followed by a tbody element. See is_optional_end. if previous and previous['type'] == 'EndTag' and \ previous['name'] in ('tbody', 'thead', 'tfoot'): return False return next["name"] == 'tr' else: return False return False def is_optional_end(self, tagname, next): type = next and next["type"] or None if tagname in ('html', 'head', 'body'): # An html element's end tag may be omitted if the html element # is not immediately followed by a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname in ('li', 'optgroup', 'tr'): # A li element's end tag may be omitted if the li element is # immediately followed by another li element or if there is # no more content in the parent element. # An optgroup element's end tag may be omitted if the optgroup # element is immediately followed by another optgroup element, # or if there is no more content in the parent element. # A tr element's end tag may be omitted if the tr element is # immediately followed by another tr element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] == tagname else: return type == "EndTag" or type is None elif tagname in ('dt', 'dd'): # A dt element's end tag may be omitted if the dt element is # immediately followed by another dt element or a dd element. # A dd element's end tag may be omitted if the dd element is # immediately followed by another dd element or a dt element, # or if there is no more content in the parent element. if type == "StartTag": return next["name"] in ('dt', 'dd') elif tagname == 'dd': return type == "EndTag" or type is None else: return False elif tagname == 'p': # A p element's end tag may be omitted if the p element is # immediately followed by an address, article, aside, # blockquote, datagrid, dialog, dir, div, dl, fieldset, # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, # nav, ol, p, pre, section, table, or ul, element, or if # there is no more content in the parent element. if type in ("StartTag", "EmptyTag"): return next["name"] in ('address', 'article', 'aside', 'blockquote', 'datagrid', 'dialog', 'dir', 'div', 'dl', 'fieldset', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hr', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul') else: return type == "EndTag" or type is None elif tagname == 'option': # An option element's end tag may be omitted if the option # element is immediately followed by another option element, # or if it is immediately followed by an optgroup # element, or if there is no more content in the parent # element. if type == "StartTag": return next["name"] in ('option', 'optgroup') else: return type == "EndTag" or type is None elif tagname in ('rt', 'rp'): # An rt element's end tag may be omitted if the rt element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. # An rp element's end tag may be omitted if the rp element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('rt', 'rp') else: return type == "EndTag" or type is None elif tagname == 'colgroup': # A colgroup element's end tag may be omitted if the colgroup # element is not immediately followed by a space character or # a comment. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we also look for an immediately following colgroup # element. See is_optional_start. return next["name"] != 'colgroup' else: return True elif tagname in ('thead', 'tbody'): # A thead element's end tag may be omitted if the thead element # is immediately followed by a tbody or tfoot element. # A tbody element's end tag may be omitted if the tbody element # is immediately followed by a tbody or tfoot element, or if # there is no more content in the parent element. # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] in ['tbody', 'tfoot'] elif tagname == 'tbody': return type == "EndTag" or type is None else: return False elif tagname == 'tfoot': # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] == 'tbody' else: return type == "EndTag" or type is None elif tagname in ('td', 'th'): # A td element's end tag may be omitted if the td element is # immediately followed by a td or th element, or if there is # no more content in the parent element. # A th element's end tag may be omitted if the th element is # immediately followed by a td or th element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('td', 'th') else: return type == "EndTag" or type is None return False PK.e[i[$RRhtml5lib/filters/base.pycnu[ abc@`s6ddlmZmZmZdefdYZdS(i(tabsolute_importtdivisiontunicode_literalstFiltercB`s#eZdZdZdZRS(cC`s ||_dS(N(tsource(tselfR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt__init__scC`s t|jS(N(titerR(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt__iter__scC`st|j|S(N(tgetattrR(Rtname((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt __getattr__ s(t__name__t __module__RRR (((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyRs  N(t __future__RRRtobjectR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pytsPK.e[}ҵ% % html5lib/filters/lint.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from . import base from ..constants import namespaces, voidElements from ..constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) class Filter(base.Filter): def __init__(self, source, require_matching_tags=True): super(Filter, self).__init__(source) self.require_matching_tags = require_matching_tags def __iter__(self): open_elements = [] for token in base.Filter.__iter__(self): type = token["type"] if type in ("StartTag", "EmptyTag"): namespace = token["namespace"] name = token["name"] assert namespace is None or isinstance(namespace, text_type) assert namespace != "" assert isinstance(name, text_type) assert name != "" assert isinstance(token["data"], dict) if (not namespace or namespace == namespaces["html"]) and name in voidElements: assert type == "EmptyTag" else: assert type == "StartTag" if type == "StartTag" and self.require_matching_tags: open_elements.append((namespace, name)) for (namespace, name), value in token["data"].items(): assert namespace is None or isinstance(namespace, text_type) assert namespace != "" assert isinstance(name, text_type) assert name != "" assert isinstance(value, text_type) elif type == "EndTag": namespace = token["namespace"] name = token["name"] assert namespace is None or isinstance(namespace, text_type) assert namespace != "" assert isinstance(name, text_type) assert name != "" if (not namespace or namespace == namespaces["html"]) and name in voidElements: assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name} elif self.require_matching_tags: start = open_elements.pop() assert start == (namespace, name) elif type == "Comment": data = token["data"] assert isinstance(data, text_type) elif type in ("Characters", "SpaceCharacters"): data = token["data"] assert isinstance(data, text_type) assert data != "" if type == "SpaceCharacters": assert data.strip(spaceCharacters) == "" elif type == "Doctype": name = token["name"] assert name is None or isinstance(name, text_type) assert token["publicId"] is None or isinstance(name, text_type) assert token["systemId"] is None or isinstance(name, text_type) elif type == "Entity": assert isinstance(token["name"], text_type) elif type == "SerializerError": assert isinstance(token["data"], text_type) else: assert False, "Unknown token type: %(type)s" % {"type": type} yield token PK.e[.qo6 html5lib/filters/lint.pycnu[ abc@`sddlmZmZmZddlmZddlmZddlm Z m Z ddlm Z dj e Z d ej fd YZ d S( i(tabsolute_importtdivisiontunicode_literals(t text_typei(tbasei(t namespacest voidElements(tspaceCharactersutFiltercB`seZedZdZRS(cC`s#tt|j|||_dS(N(tsuperRt__init__trequire_matching_tags(tselftsourceR ((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyR sc c`sPg}xCtjj|D]/}|d}|dkr|d}|d}|dksjt|tsjt|dks|tt|tst|dkstt|dtst| s|tdkr|t kr|dkstn|dkst|dkr1|j r1|j ||fnx|dj D]\\}}}|dksut|tsut|dkstt|tst|dkstt|tsBtqBWny|d kr|d}|d}|dks t|ts t|dkstt|ts2t|dksDt| s[|tdkr|t krt std i|d 6qC|j rC|j}|||fkstqCn|d kr|d}t|tsCtn[|dkrR|d}t|tst|dks%t|dkrC|jtdksOtqCn|dkr|d}|dkst|tst|ddkst|tst|ddksCt|tsCtnm|dkrt|dtsCtnE|dkr&t|dtsCtnt sCtdi|d6|VqWdS(NutypeuStartTaguEmptyTagu namespaceunameuudatauhtmluEndTagu.Void element reported as EndTag token: %(tag)sutaguCommentu CharactersuSpaceCharactersuDoctypeupublicIdusystemIduEntityuSerializerErroruUnknown token type: %(type)s(uStartTaguEmptyTag(u CharactersuSpaceCharacters(RRt__iter__tNonet isinstanceRtAssertionErrortdictRRR tappendtitemstFalsetpoptstripR( R t open_elementsttokenttypet namespacetnametvaluetstarttdata((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyRsl    !##!   !#        !  !%(  (t__name__t __module__tTrueR R(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyR s N(t __future__RRRtpip._vendor.sixRtRt constantsRRRtjoinR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyts PK.e[mhtml5lib/filters/__init__.pycnu[ abc@sdS(N((((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.pyttPK.e[viåhtml5lib/filters/whitespace.pyonu[ abc@`sddlmZmZmZddlZddlmZddlmZm Z dj e Z ej de Z d ej fd YZ d ZdS( i(tabsolute_importtdivisiontunicode_literalsNi(tbasei(trcdataElementstspaceCharactersuu[%s]+tFiltercB`s-eZeddgeeZdZRS(upreutextareacc`sd}xtjj|D]}|d}|dkr[|sN|d|jkr[|d7}ns|dkrz|rz|d8}nT| r|dkr|drd |ds PK.e[\44!html5lib/filters/optionaltags.pycnu[ abc@`sIddlmZmZmZddlmZdejfdYZdS(i(tabsolute_importtdivisiontunicode_literalsi(tbasetFiltercB`s,eZdZdZdZdZRS(cc`shd}}x:|jD]/}|dk r7|||fVn|}|}qW|dk rd||dfVndS(N(tNonetsource(tselft previous1t previous2ttoken((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pytsliders    cc`sx|jD]\}}}|d}|dkra|dsV|j|d|| r|Vqq |dkr|j|d|s|Vqq |Vq WdS(NutypeuStartTagudataunameuEndTag(R tis_optional_starttis_optional_end(RtpreviousR tnextttype((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyt__iter__s      cC`s*|r|dpd}|dkr,|dkS|dkre|dkrHtS|dkr&|d dkSn|d kr|dkrtS|dkr|d dkStSn|d kr|dkr|d dkStSnW|dkr&|dkr|r|ddkr|d dkrtS|d dkStSntS(NutypeuhtmluCommentuSpaceCharactersuheaduStartTaguEmptyTaguEndTagunameubodyuscriptustyleucolgroupucolutbodyutheadutfootutr(uCommentuSpaceCharacters(uStartTaguEmptyTag(uCommentuSpaceCharacters(uscriptustyle(uStartTaguEmptyTag(utbodyutheadutfoot(RtTruetFalse(RttagnameRRR((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyR s4            cC`s|r|dpd}|d5kr,|d6kS|d7krk|d krR|d |kS|d kpg|dkSn|d8kr|d kr|d d9kS|dkr|d kp|dkStSn|dkr|d:kr|d d;kS|d kp|dkSn|d,kr8|d kr|d d<kS|d kp4|dkSnI|d=krw|d kr^|d d>kS|d kps|dkSn |d/kr|d?krtS|d kr|d d/kStSn|d@kr|d kr|d dAkS|d1kr|d kp|dkStSn~|d2krB|d kr)|d d1kS|d kp>|dkSn?|dBkr|d krh|d dCkS|d kp}|dkSntS(DNutypeuhtmluheadubodyuCommentuSpaceCharactersuliuoptgrouputruStartTagunameuEndTagudtuddupuEmptyTaguaddressuarticleuasideu blockquoteudatagridudialogudirudivudlufieldsetufooteruformuh1uh2uh3uh4uh5uh6uheaderuhrumenuunavuolupreusectionutableuuluoptionurturpucolgrouputheadutbodyutfootutduth(uhtmluheadubody(uCommentuSpaceCharacters(uliuoptgrouputr(udtudd(udtudd(uStartTaguEmptyTag(uaddressuarticleuasideu blockquoteudatagridudialogudirudivudlufieldsetufooteruformuh1uh2uh3uh4uh5uh6uheaderuhrumenuunavuolupupreusectionutableuul(uoptionuoptgroup(urturp(urturp(uCommentuSpaceCharacters(utheadutbody(utbodyutfoot(utduth(utduth(RRR(RRRR((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyR Wsf                     (t__name__t __module__R RR R (((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyRs 9N(t __future__RRRtRR(((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pytsPK.e[Ubrr+html5lib/filters/alphabeticalattributes.pycnu[ abc@`sddlmZmZmZddlmZyddlmZWn!ek rcddl mZnXdej fdYZ dS(i(tabsolute_importtdivisiontunicode_literalsi(tbase(t OrderedDicttFiltercB`seZdZRS(cc`sxtjj|D]k}|ddkryt}x7t|djddD]\}}|||t(uStartTaguEmptyTag(RRt__iter__Rtsortedtitems(tselfttokentattrstnametvalue((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyR s  (t__name__t __module__R (((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyR sN( t __future__RRRR Rt collectionsRt ImportErrort ordereddictR(((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyts  PK.e[viåhtml5lib/filters/whitespace.pycnu[ abc@`sddlmZmZmZddlZddlmZddlmZm Z dj e Z ej de Z d ej fd YZ d ZdS( i(tabsolute_importtdivisiontunicode_literalsNi(tbasei(trcdataElementstspaceCharactersuu[%s]+tFiltercB`s-eZeddgeeZdZRS(upreutextareacc`sd}xtjj|D]}|d}|dkr[|sN|d|jkr[|d7}ns|dkrz|rz|d8}nT| r|dkr|drd |ds PK.e[Tbbhtml5lib/filters/sanitizer.pynu[from __future__ import absolute_import, division, unicode_literals import re from xml.sax.saxutils import escape, unescape from pip._vendor.six.moves import urllib_parse as urlparse from . import base from ..constants import namespaces, prefixes __all__ = ["Filter"] allowed_elements = frozenset(( (namespaces['html'], 'a'), (namespaces['html'], 'abbr'), (namespaces['html'], 'acronym'), (namespaces['html'], 'address'), (namespaces['html'], 'area'), (namespaces['html'], 'article'), (namespaces['html'], 'aside'), (namespaces['html'], 'audio'), (namespaces['html'], 'b'), (namespaces['html'], 'big'), (namespaces['html'], 'blockquote'), (namespaces['html'], 'br'), (namespaces['html'], 'button'), (namespaces['html'], 'canvas'), (namespaces['html'], 'caption'), (namespaces['html'], 'center'), (namespaces['html'], 'cite'), (namespaces['html'], 'code'), (namespaces['html'], 'col'), (namespaces['html'], 'colgroup'), (namespaces['html'], 'command'), (namespaces['html'], 'datagrid'), (namespaces['html'], 'datalist'), (namespaces['html'], 'dd'), (namespaces['html'], 'del'), (namespaces['html'], 'details'), (namespaces['html'], 'dfn'), (namespaces['html'], 'dialog'), (namespaces['html'], 'dir'), (namespaces['html'], 'div'), (namespaces['html'], 'dl'), (namespaces['html'], 'dt'), (namespaces['html'], 'em'), (namespaces['html'], 'event-source'), (namespaces['html'], 'fieldset'), (namespaces['html'], 'figcaption'), (namespaces['html'], 'figure'), (namespaces['html'], 'footer'), (namespaces['html'], 'font'), (namespaces['html'], 'form'), (namespaces['html'], 'header'), (namespaces['html'], 'h1'), (namespaces['html'], 'h2'), (namespaces['html'], 'h3'), (namespaces['html'], 'h4'), (namespaces['html'], 'h5'), (namespaces['html'], 'h6'), (namespaces['html'], 'hr'), (namespaces['html'], 'i'), (namespaces['html'], 'img'), (namespaces['html'], 'input'), (namespaces['html'], 'ins'), (namespaces['html'], 'keygen'), (namespaces['html'], 'kbd'), (namespaces['html'], 'label'), (namespaces['html'], 'legend'), (namespaces['html'], 'li'), (namespaces['html'], 'm'), (namespaces['html'], 'map'), (namespaces['html'], 'menu'), (namespaces['html'], 'meter'), (namespaces['html'], 'multicol'), (namespaces['html'], 'nav'), (namespaces['html'], 'nextid'), (namespaces['html'], 'ol'), (namespaces['html'], 'output'), (namespaces['html'], 'optgroup'), (namespaces['html'], 'option'), (namespaces['html'], 'p'), (namespaces['html'], 'pre'), (namespaces['html'], 'progress'), (namespaces['html'], 'q'), (namespaces['html'], 's'), (namespaces['html'], 'samp'), (namespaces['html'], 'section'), (namespaces['html'], 'select'), (namespaces['html'], 'small'), (namespaces['html'], 'sound'), (namespaces['html'], 'source'), (namespaces['html'], 'spacer'), (namespaces['html'], 'span'), (namespaces['html'], 'strike'), (namespaces['html'], 'strong'), (namespaces['html'], 'sub'), (namespaces['html'], 'sup'), (namespaces['html'], 'table'), (namespaces['html'], 'tbody'), (namespaces['html'], 'td'), (namespaces['html'], 'textarea'), (namespaces['html'], 'time'), (namespaces['html'], 'tfoot'), (namespaces['html'], 'th'), (namespaces['html'], 'thead'), (namespaces['html'], 'tr'), (namespaces['html'], 'tt'), (namespaces['html'], 'u'), (namespaces['html'], 'ul'), (namespaces['html'], 'var'), (namespaces['html'], 'video'), (namespaces['mathml'], 'maction'), (namespaces['mathml'], 'math'), (namespaces['mathml'], 'merror'), (namespaces['mathml'], 'mfrac'), (namespaces['mathml'], 'mi'), (namespaces['mathml'], 'mmultiscripts'), (namespaces['mathml'], 'mn'), (namespaces['mathml'], 'mo'), (namespaces['mathml'], 'mover'), (namespaces['mathml'], 'mpadded'), (namespaces['mathml'], 'mphantom'), (namespaces['mathml'], 'mprescripts'), (namespaces['mathml'], 'mroot'), (namespaces['mathml'], 'mrow'), (namespaces['mathml'], 'mspace'), (namespaces['mathml'], 'msqrt'), (namespaces['mathml'], 'mstyle'), (namespaces['mathml'], 'msub'), (namespaces['mathml'], 'msubsup'), (namespaces['mathml'], 'msup'), (namespaces['mathml'], 'mtable'), (namespaces['mathml'], 'mtd'), (namespaces['mathml'], 'mtext'), (namespaces['mathml'], 'mtr'), (namespaces['mathml'], 'munder'), (namespaces['mathml'], 'munderover'), (namespaces['mathml'], 'none'), (namespaces['svg'], 'a'), (namespaces['svg'], 'animate'), (namespaces['svg'], 'animateColor'), (namespaces['svg'], 'animateMotion'), (namespaces['svg'], 'animateTransform'), (namespaces['svg'], 'clipPath'), (namespaces['svg'], 'circle'), (namespaces['svg'], 'defs'), (namespaces['svg'], 'desc'), (namespaces['svg'], 'ellipse'), (namespaces['svg'], 'font-face'), (namespaces['svg'], 'font-face-name'), (namespaces['svg'], 'font-face-src'), (namespaces['svg'], 'g'), (namespaces['svg'], 'glyph'), (namespaces['svg'], 'hkern'), (namespaces['svg'], 'linearGradient'), (namespaces['svg'], 'line'), (namespaces['svg'], 'marker'), (namespaces['svg'], 'metadata'), (namespaces['svg'], 'missing-glyph'), (namespaces['svg'], 'mpath'), (namespaces['svg'], 'path'), (namespaces['svg'], 'polygon'), (namespaces['svg'], 'polyline'), (namespaces['svg'], 'radialGradient'), (namespaces['svg'], 'rect'), (namespaces['svg'], 'set'), (namespaces['svg'], 'stop'), (namespaces['svg'], 'svg'), (namespaces['svg'], 'switch'), (namespaces['svg'], 'text'), (namespaces['svg'], 'title'), (namespaces['svg'], 'tspan'), (namespaces['svg'], 'use'), )) allowed_attributes = frozenset(( # HTML attributes (None, 'abbr'), (None, 'accept'), (None, 'accept-charset'), (None, 'accesskey'), (None, 'action'), (None, 'align'), (None, 'alt'), (None, 'autocomplete'), (None, 'autofocus'), (None, 'axis'), (None, 'background'), (None, 'balance'), (None, 'bgcolor'), (None, 'bgproperties'), (None, 'border'), (None, 'bordercolor'), (None, 'bordercolordark'), (None, 'bordercolorlight'), (None, 'bottompadding'), (None, 'cellpadding'), (None, 'cellspacing'), (None, 'ch'), (None, 'challenge'), (None, 'char'), (None, 'charoff'), (None, 'choff'), (None, 'charset'), (None, 'checked'), (None, 'cite'), (None, 'class'), (None, 'clear'), (None, 'color'), (None, 'cols'), (None, 'colspan'), (None, 'compact'), (None, 'contenteditable'), (None, 'controls'), (None, 'coords'), (None, 'data'), (None, 'datafld'), (None, 'datapagesize'), (None, 'datasrc'), (None, 'datetime'), (None, 'default'), (None, 'delay'), (None, 'dir'), (None, 'disabled'), (None, 'draggable'), (None, 'dynsrc'), (None, 'enctype'), (None, 'end'), (None, 'face'), (None, 'for'), (None, 'form'), (None, 'frame'), (None, 'galleryimg'), (None, 'gutter'), (None, 'headers'), (None, 'height'), (None, 'hidefocus'), (None, 'hidden'), (None, 'high'), (None, 'href'), (None, 'hreflang'), (None, 'hspace'), (None, 'icon'), (None, 'id'), (None, 'inputmode'), (None, 'ismap'), (None, 'keytype'), (None, 'label'), (None, 'leftspacing'), (None, 'lang'), (None, 'list'), (None, 'longdesc'), (None, 'loop'), (None, 'loopcount'), (None, 'loopend'), (None, 'loopstart'), (None, 'low'), (None, 'lowsrc'), (None, 'max'), (None, 'maxlength'), (None, 'media'), (None, 'method'), (None, 'min'), (None, 'multiple'), (None, 'name'), (None, 'nohref'), (None, 'noshade'), (None, 'nowrap'), (None, 'open'), (None, 'optimum'), (None, 'pattern'), (None, 'ping'), (None, 'point-size'), (None, 'poster'), (None, 'pqg'), (None, 'preload'), (None, 'prompt'), (None, 'radiogroup'), (None, 'readonly'), (None, 'rel'), (None, 'repeat-max'), (None, 'repeat-min'), (None, 'replace'), (None, 'required'), (None, 'rev'), (None, 'rightspacing'), (None, 'rows'), (None, 'rowspan'), (None, 'rules'), (None, 'scope'), (None, 'selected'), (None, 'shape'), (None, 'size'), (None, 'span'), (None, 'src'), (None, 'start'), (None, 'step'), (None, 'style'), (None, 'summary'), (None, 'suppress'), (None, 'tabindex'), (None, 'target'), (None, 'template'), (None, 'title'), (None, 'toppadding'), (None, 'type'), (None, 'unselectable'), (None, 'usemap'), (None, 'urn'), (None, 'valign'), (None, 'value'), (None, 'variable'), (None, 'volume'), (None, 'vspace'), (None, 'vrml'), (None, 'width'), (None, 'wrap'), (namespaces['xml'], 'lang'), # MathML attributes (None, 'actiontype'), (None, 'align'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnlines'), (None, 'columnspacing'), (None, 'columnspan'), (None, 'depth'), (None, 'display'), (None, 'displaystyle'), (None, 'equalcolumns'), (None, 'equalrows'), (None, 'fence'), (None, 'fontstyle'), (None, 'fontweight'), (None, 'frame'), (None, 'height'), (None, 'linethickness'), (None, 'lspace'), (None, 'mathbackground'), (None, 'mathcolor'), (None, 'mathvariant'), (None, 'mathvariant'), (None, 'maxsize'), (None, 'minsize'), (None, 'other'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowlines'), (None, 'rowspacing'), (None, 'rowspan'), (None, 'rspace'), (None, 'scriptlevel'), (None, 'selection'), (None, 'separator'), (None, 'stretchy'), (None, 'width'), (None, 'width'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'type'), # SVG attributes (None, 'accent-height'), (None, 'accumulate'), (None, 'additive'), (None, 'alphabetic'), (None, 'arabic-form'), (None, 'ascent'), (None, 'attributeName'), (None, 'attributeType'), (None, 'baseProfile'), (None, 'bbox'), (None, 'begin'), (None, 'by'), (None, 'calcMode'), (None, 'cap-height'), (None, 'class'), (None, 'clip-path'), (None, 'color'), (None, 'color-rendering'), (None, 'content'), (None, 'cx'), (None, 'cy'), (None, 'd'), (None, 'dx'), (None, 'dy'), (None, 'descent'), (None, 'display'), (None, 'dur'), (None, 'end'), (None, 'fill'), (None, 'fill-opacity'), (None, 'fill-rule'), (None, 'font-family'), (None, 'font-size'), (None, 'font-stretch'), (None, 'font-style'), (None, 'font-variant'), (None, 'font-weight'), (None, 'from'), (None, 'fx'), (None, 'fy'), (None, 'g1'), (None, 'g2'), (None, 'glyph-name'), (None, 'gradientUnits'), (None, 'hanging'), (None, 'height'), (None, 'horiz-adv-x'), (None, 'horiz-origin-x'), (None, 'id'), (None, 'ideographic'), (None, 'k'), (None, 'keyPoints'), (None, 'keySplines'), (None, 'keyTimes'), (None, 'lang'), (None, 'marker-end'), (None, 'marker-mid'), (None, 'marker-start'), (None, 'markerHeight'), (None, 'markerUnits'), (None, 'markerWidth'), (None, 'mathematical'), (None, 'max'), (None, 'min'), (None, 'name'), (None, 'offset'), (None, 'opacity'), (None, 'orient'), (None, 'origin'), (None, 'overline-position'), (None, 'overline-thickness'), (None, 'panose-1'), (None, 'path'), (None, 'pathLength'), (None, 'points'), (None, 'preserveAspectRatio'), (None, 'r'), (None, 'refX'), (None, 'refY'), (None, 'repeatCount'), (None, 'repeatDur'), (None, 'requiredExtensions'), (None, 'requiredFeatures'), (None, 'restart'), (None, 'rotate'), (None, 'rx'), (None, 'ry'), (None, 'slope'), (None, 'stemh'), (None, 'stemv'), (None, 'stop-color'), (None, 'stop-opacity'), (None, 'strikethrough-position'), (None, 'strikethrough-thickness'), (None, 'stroke'), (None, 'stroke-dasharray'), (None, 'stroke-dashoffset'), (None, 'stroke-linecap'), (None, 'stroke-linejoin'), (None, 'stroke-miterlimit'), (None, 'stroke-opacity'), (None, 'stroke-width'), (None, 'systemLanguage'), (None, 'target'), (None, 'text-anchor'), (None, 'to'), (None, 'transform'), (None, 'type'), (None, 'u1'), (None, 'u2'), (None, 'underline-position'), (None, 'underline-thickness'), (None, 'unicode'), (None, 'unicode-range'), (None, 'units-per-em'), (None, 'values'), (None, 'version'), (None, 'viewBox'), (None, 'visibility'), (None, 'width'), (None, 'widths'), (None, 'x'), (None, 'x-height'), (None, 'x1'), (None, 'x2'), (namespaces['xlink'], 'actuate'), (namespaces['xlink'], 'arcrole'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'role'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'title'), (namespaces['xlink'], 'type'), (namespaces['xml'], 'base'), (namespaces['xml'], 'lang'), (namespaces['xml'], 'space'), (None, 'y'), (None, 'y1'), (None, 'y2'), (None, 'zoomAndPan'), )) attr_val_is_uri = frozenset(( (None, 'href'), (None, 'src'), (None, 'cite'), (None, 'action'), (None, 'longdesc'), (None, 'poster'), (None, 'background'), (None, 'datasrc'), (None, 'dynsrc'), (None, 'lowsrc'), (None, 'ping'), (namespaces['xlink'], 'href'), (namespaces['xml'], 'base'), )) svg_attr_val_allows_ref = frozenset(( (None, 'clip-path'), (None, 'color-profile'), (None, 'cursor'), (None, 'fill'), (None, 'filter'), (None, 'marker'), (None, 'marker-start'), (None, 'marker-mid'), (None, 'marker-end'), (None, 'mask'), (None, 'stroke'), )) svg_allow_local_href = frozenset(( (None, 'altGlyph'), (None, 'animate'), (None, 'animateColor'), (None, 'animateMotion'), (None, 'animateTransform'), (None, 'cursor'), (None, 'feImage'), (None, 'filter'), (None, 'linearGradient'), (None, 'pattern'), (None, 'radialGradient'), (None, 'textpath'), (None, 'tref'), (None, 'set'), (None, 'use') )) allowed_css_properties = frozenset(( 'azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width', )) allowed_css_keywords = frozenset(( 'auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow', )) allowed_svg_properties = frozenset(( 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity', )) allowed_protocols = frozenset(( 'ed2k', 'ftp', 'http', 'https', 'irc', 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', 'ssh', 'sftp', 'rtsp', 'afs', 'data', )) allowed_content_types = frozenset(( 'image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain', )) data_content_type = re.compile(r''' ^ # Match a content type / (?P[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) # Match any character set and encoding (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) # Assume the rest is data ,.* $ ''', re.VERBOSE) class Filter(base.Filter): """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" def __init__(self, source, allowed_elements=allowed_elements, allowed_attributes=allowed_attributes, allowed_css_properties=allowed_css_properties, allowed_css_keywords=allowed_css_keywords, allowed_svg_properties=allowed_svg_properties, allowed_protocols=allowed_protocols, allowed_content_types=allowed_content_types, attr_val_is_uri=attr_val_is_uri, svg_attr_val_allows_ref=svg_attr_val_allows_ref, svg_allow_local_href=svg_allow_local_href): super(Filter, self).__init__(source) self.allowed_elements = allowed_elements self.allowed_attributes = allowed_attributes self.allowed_css_properties = allowed_css_properties self.allowed_css_keywords = allowed_css_keywords self.allowed_svg_properties = allowed_svg_properties self.allowed_protocols = allowed_protocols self.allowed_content_types = allowed_content_types self.attr_val_is_uri = attr_val_is_uri self.svg_attr_val_allows_ref = svg_attr_val_allows_ref self.svg_allow_local_href = svg_allow_local_href def __iter__(self): for token in base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style # attributes are parsed, and a restricted set, # specified by # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified # in ALLOWED_PROTOCOLS are allowed. # # sanitize_html('') # => <script> do_nasty_stuff() </script> # sanitize_html('
Click here for $100') # => Click here for $100 def sanitize_token(self, token): # accommodate filters which use token_type differently token_type = token["type"] if token_type in ("StartTag", "EndTag", "EmptyTag"): name = token["name"] namespace = token["namespace"] if ((namespace, name) in self.allowed_elements or (namespace is None and (namespaces["html"], name) in self.allowed_elements)): return self.allowed_token(token) else: return self.disallowed_token(token) elif token_type == "Comment": pass else: return token def allowed_token(self, token): if "data" in token: attrs = token["data"] attr_names = set(attrs.keys()) # Remove forbidden attributes for to_remove in (attr_names - self.allowed_attributes): del token["data"][to_remove] attr_names.remove(to_remove) # Remove attributes with disallowed URL values for attr in (attr_names & self.attr_val_is_uri): assert attr in attrs # I don't have a clue where this regexp comes from or why it matches those # characters, nor why we call unescape. I just know it's always been here. # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all # this will do is remove *more* than it otherwise would. val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\s]+", '', unescape(attrs[attr])).lower() # remove replacement characters from unescaped characters val_unescaped = val_unescaped.replace("\ufffd", "") try: uri = urlparse.urlparse(val_unescaped) except ValueError: uri = None del attrs[attr] if uri and uri.scheme: if uri.scheme not in self.allowed_protocols: del attrs[attr] if uri.scheme == 'data': m = data_content_type.match(uri.path) if not m: del attrs[attr] elif m.group('content_type') not in self.allowed_content_types: del attrs[attr] for attr in self.svg_attr_val_allows_ref: if attr in attrs: attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(attrs[attr])) if (token["name"] in self.svg_allow_local_href and (namespaces['xlink'], 'href') in attrs and re.search('^\s*[^#\s].*', attrs[(namespaces['xlink'], 'href')])): del attrs[(namespaces['xlink'], 'href')] if (None, 'style') in attrs: attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) token["data"] = attrs return token def disallowed_token(self, token): token_type = token["type"] if token_type == "EndTag": token["data"] = "" % token["name"] elif token["data"]: assert token_type in ("StartTag", "EmptyTag") attrs = [] for (ns, name), v in token["data"].items(): attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" token["type"] = "Characters" del token["name"] return token def sanitize_css(self, style): # disallow urls style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): if keyword not in self.allowed_css_keywords and \ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa break else: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) PK.e[Y\(html5lib/filters/inject_meta_charset.pyonu[ abc@`sIddlmZmZmZddlmZdejfdYZdS(i(tabsolute_importtdivisiontunicode_literalsi(tbasetFiltercB`seZdZdZRS(cC`s tjj||||_dS(N(RRt__init__tencoding(tselftsourceR((sT/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.pyRsc c`sd}|jdk}g}xvtjj|D]b}|d}|dkrl|djdkrod}qon|dkr|djdkr^t}xE|d jD]~\\}}} |dk rqq|jd kr|j|d ||fsPK.e[Ubrr+html5lib/filters/alphabeticalattributes.pyonu[ abc@`sddlmZmZmZddlmZyddlmZWn!ek rcddl mZnXdej fdYZ dS(i(tabsolute_importtdivisiontunicode_literalsi(tbase(t OrderedDicttFiltercB`seZdZRS(cc`sxtjj|D]k}|ddkryt}x7t|djddD]\}}|||t(uStartTaguEmptyTag(RRt__iter__Rtsortedtitems(tselfttokentattrstnametvalue((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyR s  (t__name__t __module__R (((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyR sN( t __future__RRRR Rt collectionsRt ImportErrort ordereddictR(((sW/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyts  PK.e[i[$RRhtml5lib/filters/base.pyonu[ abc@`s6ddlmZmZmZdefdYZdS(i(tabsolute_importtdivisiontunicode_literalstFiltercB`s#eZdZdZdZRS(cC`s ||_dS(N(tsource(tselfR((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt__init__scC`s t|jS(N(titerR(R((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt__iter__scC`st|j|S(N(tgetattrR(Rtname((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyt __getattr__ s(t__name__t __module__RRR (((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyRs  N(t __future__RRRtobjectR(((sE/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pytsPK.e[2= 2d2dhtml5lib/filters/sanitizer.pyonu[ abcE@`s2ddlmZmZmZddlZddlmZmZddlm Z ddl m Z ddl mZmZd gZeed d fed d fed d fed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed d fed d!fed d"fed d#fed d$fed d%fed d&fed d'fed d(fed d)fed d*fed d+fed d,fed d-fed d.fed d/fed d0fed d1fed d2fed d3fed d4fed d5fed d6fed d7fed d8fed d9fed d:fed d;fed d<fed d=fed d>fed d?fed d@fed dAfed dBfed dCfed dDfed dEfed dFfed dGfed dHfed dIfed dJfed dKfed dLfed dMfed dNfed dOfed dPfed dQfed dRfed dSfed dTfed dUfed dVfed dWfed dXfed dYfed dZfed d[fed d\fed d]fed d^fed d_fed d`fed dafed dbfed dcfed ddfed defed dffed dgfed dhfed difed djfed dkfed dlfed dmfedndofedndpfedndqfedndrfedndsfedndtfedndufedndvfedndwfedndxfedndyfedndzfednd{fednd|fednd}fednd~fedndfedndfedndfedndfedndfedndfedndfedndfedndfedndfedndfedd feddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddffZed4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtdudvdwdxdydzd{d|d}d~ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddded1dfddddddddddddddddddddddddddddddddddddddddedOdfedOdPfedOd%fdddddddddddddddddddddddddddddddddd d d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcddedOdfedOdfedOdfedOdfedOdPfedOdfedOd%fed1dfed1dfed1dfdedfdgdhfCZedidjdkdldmdndodpdqdrdsedOdfed1dff Zedtdudvdwdxdydzd{d|d}d~f ZedddddddddddddddfZedZedZedZedZedZejd1ejZd2e j fd3YZ dS(i(tabsolute_importtdivisiontunicode_literalsN(tescapetunescape(t urllib_parsei(tbasei(t namespacestprefixesuFilteruhtmluauabbruacronymuaddressuareauarticleuasideuaudioububigu blockquoteubrubuttonucanvasucaptionucenteruciteucodeucolucolgroupucommandudatagridudatalistuddudeludetailsudfnudialogudirudivudludtuemu event-sourceufieldsetu figcaptionufigureufooterufontuformuheaderuh1uh2uh3uh4uh5uh6uhruiuimguinputuinsukeygenukbdulabelulegenduliumumapumenuumeterumulticolunavunextiduoluoutputuoptgroupuoptionupupreuprogressuqususampusectionuselectusmallusoundusourceuspaceruspanustrikeustrongusubusuputableutbodyutdutextareautimeutfootuthutheadutruttuuuuluvaruvideoumathmlumactionumathumerrorumfracumiu mmultiscriptsumnumoumoverumpaddedumphantomu mprescriptsumrootumrowumspaceumsqrtumstyleumsubumsubsupumsupumtableumtdumtextumtrumunderu munderoverunoneusvguanimateu animateColoru animateMotionuanimateTransformuclipPathucircleudefsudescuellipseu font-faceufont-face-nameu font-face-srcuguglyphuhkernulinearGradientulineumarkerumetadatau missing-glyphumpathupathupolygonupolylineuradialGradienturectusetustopuswitchutextutitleutspanuuseuacceptuaccept-charsetu accesskeyuactionualignualtu autocompleteu autofocusuaxisu backgroundubalanceubgcoloru bgpropertiesuborderu bordercolorubordercolordarkubordercolorlightu bottompaddingu cellpaddingu cellspacinguchu challengeucharucharoffuchoffucharsetucheckeduclassuclearucolorucolsucolspanucompactucontenteditableucontrolsucoordsudataudatafldu datapagesizeudatasrcudatetimeudefaultudelayudisabledu draggableudynsrcuenctypeuendufaceuforuframeu galleryimgugutteruheadersuheightu hidefocusuhiddenuhighuhrefuhreflanguhspaceuiconuidu inputmodeuismapukeytypeu leftspacingulangulistulongdesculoopu loopcountuloopendu loopstartulowulowsrcumaxu maxlengthumediaumethoduminumultipleunameunohrefunoshadeunowrapuopenuoptimumupatternupingu point-sizeuposterupqgupreloadupromptu radiogroupureadonlyurelu repeat-maxu repeat-minureplaceurequiredurevu rightspacingurowsurowspanurulesuscopeuselectedushapeusizeusrcustartustepustyleusummaryusuppressutabindexutargetutemplateu toppaddingutypeu unselectableuusemapuurnuvalignuvalueuvariableuvolumeuvspaceuvrmluwidthuwrapuxmlu actiontypeu columnalignu columnlinesu columnspacingu columnspanudepthudisplayu displaystyleu equalcolumnsu equalrowsufenceu fontstyleu fontweightu linethicknessulspaceumathbackgroundu mathcoloru mathvariantumaxsizeuminsizeuotherurowalignurowlinesu rowspacingurspaceu scriptlevelu selectionu separatorustretchyuxlinkushowu accent-heightu accumulateuadditiveu alphabeticu arabic-formuascentu attributeNameu attributeTypeu baseProfileubboxubeginubyucalcModeu cap-heightu clip-pathucolor-renderingucontentucxucyududxudyudescentudurufillu fill-opacityu fill-ruleu font-familyu font-sizeu font-stretchu font-styleu font-variantu font-weightufromufxufyug1ug2u glyph-nameu gradientUnitsuhangingu horiz-adv-xuhoriz-origin-xu ideographicuku keyPointsu keySplinesukeyTimesu marker-endu marker-midu marker-startu markerHeightu markerUnitsu markerWidthu mathematicaluoffsetuopacityuorientuoriginuoverline-positionuoverline-thicknessupanose-1u pathLengthupointsupreserveAspectRatioururefXurefYu repeatCountu repeatDururequiredExtensionsurequiredFeaturesurestarturotateurxuryuslopeustemhustemvu stop-coloru stop-opacityustrikethrough-positionustrikethrough-thicknessustrokeustroke-dasharrayustroke-dashoffsetustroke-linecapustroke-linejoinustroke-miterlimitustroke-opacityu stroke-widthusystemLanguageu text-anchorutou transformuu1uu2uunderline-positionuunderline-thicknessuunicodeu unicode-rangeu units-per-emuvaluesuversionuviewBoxu visibilityuwidthsuxux-heightux1ux2uactuateuarcroleuroleubaseuspaceuyuy1uy2u zoomAndPanu color-profileucursorufilterumaskualtGlyphufeImageutextpathutrefuazimuthubackground-coloruborder-bottom-coloruborder-collapseu border-coloruborder-left-coloruborder-right-coloruborder-top-coloru directionu elevationufloatuletter-spacingu line-heightuoverflowupauseu pause-afteru pause-beforeupitchu pitch-rangeurichnessuspeaku speak-headeru speak-numeraluspeak-punctuationu speech-rateustressu text-alignutext-decorationu text-indentu unicode-bidiuvertical-alignu voice-familyu white-spaceuautouaquaublackublockublueuboldubothubottomubrownucollapseudashedudottedufuchsiaugrayugreenu !importantuitaliculeftulimeumaroonumediumunavyunormaluoliveupointerupurpleuredurightusolidusilverutealutopu transparentu underlineuwhiteuyellowued2kuftpuhttpuhttpsuircumailtounewsugopherunntputelnetuwebcaluxmppucalltoufeeduaimursyncutagusshusftpurtspuafsu image/pngu image/jpegu image/gifu image/webpu image/bmpu text/plainuL ^ # Match a content type / (?P[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) # Match any character set and encoding (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) # Assume the rest is data ,.* $ tFilterc B`sbeZdZeeeeeee e e e d Z dZdZdZdZdZRS(uA sanitization of XHTML+MathML+SVG and of inline style attributes.c C`sttt|j|||_||_||_||_||_||_||_ | |_ | |_ | |_ dS(N( tsuperR t__init__tallowed_elementstallowed_attributestallowed_css_propertiestallowed_css_keywordstallowed_svg_propertiestallowed_protocolstallowed_content_typestattr_val_is_uritsvg_attr_val_allows_reftsvg_allow_local_href( tselftsourceR R RRRRRRRR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR s          cc`s>x7tjj|D]#}|j|}|r|VqqWdS(N(RR t__iter__tsanitize_token(Rttoken((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyRscC`s|d}|d kr|d}|d}||f|jksd|dkrqtd|f|jkrq|j|S|j|Sn|dkrn|SdS( NutypeuStartTaguEndTaguEmptyTagunameu namespaceuhtmluComment(uStartTaguEndTaguEmptyTag(R tNoneRt allowed_tokentdisallowed_token(RRt token_typetnamet namespace((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyRs       c C`s9d|kr5|d}t|j}x-||jD]}|d|=|j|q6Wx||j@D]}tjddt||j}|j dd}yt j |}Wnt k rd}||=nX|rf|j rf|j |jkr||=n|j dkr[tj|j}|s3||=qX|jd|jkrX||=qXq[qfqfWxC|jD]8}||kritjddt||||unameudatau %s="%s"u%s:%su<%s%s>uu<%s>u selfClosingiu/>u Characters(titemstappendRRRtjointget(RRRR1tnsRtv((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR2s   #A$ cC`sctjdjd|}tjd|s1dStjd|sGdSg}xtjd|D]\}}|sxq`n|j|jkr|j|d|dq`|jd d jdkr!x|jD],}||j krtjd| rPqqW|j|d|dq`|j|j kr`|j|d|dq`q`Wdj |S(Nuurl\s*\(\s*[^\s)]+?\s*\)\s*u u@^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$uu ^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$u([-\w]+)\s*:\s*([^:;]*)u: u;u-iu backgrounduborderumarginupaddingu\^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$(u backgrounduborderumarginupadding( R$tcompileR%R,tfindallR&RR9tsplitRRR:(Rtstyletcleantproptvaluetkeyword((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR0Fs*  (t__name__t __module__t__doc__R R RRRRRRRRR RRRRR0(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR s    2 (Nuabbr(Nuaccept(Nuaccept-charset(Nu accesskey(Nuaction(Nualign(Nualt(Nu autocomplete(Nu autofocus(Nuaxis(Nu background(Nubalance(Nubgcolor(Nu bgproperties(Nuborder(Nu bordercolor(Nubordercolordark(Nubordercolorlight(Nu bottompadding(Nu cellpadding(Nu cellspacing(Nuch(Nu challenge(Nuchar(Nucharoff(Nuchoff(Nucharset(Nuchecked(Nucite(Nuclass(Nuclear(Nucolor(Nucols(Nucolspan(Nucompact(Nucontenteditable(Nucontrols(Nucoords(Nudata(Nudatafld(Nu datapagesize(Nudatasrc(Nudatetime(Nudefault(Nudelay(Nudir(Nudisabled(Nu draggable(Nudynsrc(Nuenctype(Nuend(Nuface(Nufor(Nuform(Nuframe(Nu galleryimg(Nugutter(Nuheaders(Nuheight(Nu hidefocus(Nuhidden(Nuhigh(Nuhref(Nuhreflang(Nuhspace(Nuicon(Nuid(Nu inputmode(Nuismap(Nukeytype(Nulabel(Nu leftspacing(Nulang(Nulist(Nulongdesc(Nuloop(Nu loopcount(Nuloopend(Nu loopstart(Nulow(Nulowsrc(Numax(Nu maxlength(Numedia(Numethod(Numin(Numultiple(Nuname(Nunohref(Nunoshade(Nunowrap(Nuopen(Nuoptimum(Nupattern(Nuping(Nu point-size(Nuposter(Nupqg(Nupreload(Nuprompt(Nu radiogroup(Nureadonly(Nurel(Nu repeat-max(Nu repeat-min(Nureplace(Nurequired(Nurev(Nu rightspacing(Nurows(Nurowspan(Nurules(Nuscope(Nuselected(Nushape(Nusize(Nuspan(Nusrc(Nustart(Nustep(Nustyle(Nusummary(Nusuppress(Nutabindex(Nutarget(Nutemplate(Nutitle(Nu toppadding(Nutype(Nu unselectable(Nuusemap(Nuurn(Nuvalign(Nuvalue(Nuvariable(Nuvolume(Nuvspace(Nuvrml(Nuwidth(Nuwrap(Nu actiontype(Nualign(Nu columnalign(Nu columnalign(Nu columnalign(Nu columnlines(Nu columnspacing(Nu columnspan(Nudepth(Nudisplay(Nu displaystyle(Nu equalcolumns(Nu equalrows(Nufence(Nu fontstyle(Nu fontweight(Nuframe(Nuheight(Nu linethickness(Nulspace(Numathbackground(Nu mathcolor(Nu mathvariant(Nu mathvariant(Numaxsize(Numinsize(Nuother(Nurowalign(Nurowalign(Nurowalign(Nurowlines(Nu rowspacing(Nurowspan(Nurspace(Nu scriptlevel(Nu selection(Nu separator(Nustretchy(Nuwidth(Nuwidth(Nu accent-height(Nu accumulate(Nuadditive(Nu alphabetic(Nu arabic-form(Nuascent(Nu attributeName(Nu attributeType(Nu baseProfile(Nubbox(Nubegin(Nuby(NucalcMode(Nu cap-height(Nuclass(Nu clip-path(Nucolor(Nucolor-rendering(Nucontent(Nucx(Nucy(Nud(Nudx(Nudy(Nudescent(Nudisplay(Nudur(Nuend(Nufill(Nu fill-opacity(Nu fill-rule(Nu font-family(Nu font-size(Nu font-stretch(Nu font-style(Nu font-variant(Nu font-weight(Nufrom(Nufx(Nufy(Nug1(Nug2(Nu glyph-name(Nu gradientUnits(Nuhanging(Nuheight(Nu horiz-adv-x(Nuhoriz-origin-x(Nuid(Nu ideographic(Nuk(Nu keyPoints(Nu keySplines(NukeyTimes(Nulang(Nu marker-end(Nu marker-mid(Nu marker-start(Nu markerHeight(Nu markerUnits(Nu markerWidth(Nu mathematical(Numax(Numin(Nuname(Nuoffset(Nuopacity(Nuorient(Nuorigin(Nuoverline-position(Nuoverline-thickness(Nupanose-1(Nupath(Nu pathLength(Nupoints(NupreserveAspectRatio(Nur(NurefX(NurefY(Nu repeatCount(Nu repeatDur(NurequiredExtensions(NurequiredFeatures(Nurestart(Nurotate(Nurx(Nury(Nuslope(Nustemh(Nustemv(Nu stop-color(Nu stop-opacity(Nustrikethrough-position(Nustrikethrough-thickness(Nustroke(Nustroke-dasharray(Nustroke-dashoffset(Nustroke-linecap(Nustroke-linejoin(Nustroke-miterlimit(Nustroke-opacity(Nu stroke-width(NusystemLanguage(Nutarget(Nu text-anchor(Nuto(Nu transform(Nutype(Nuu1(Nuu2(Nuunderline-position(Nuunderline-thickness(Nuunicode(Nu unicode-range(Nu units-per-em(Nuvalues(Nuversion(NuviewBox(Nu visibility(Nuwidth(Nuwidths(Nux(Nux-height(Nux1(Nux2(Nuy(Nuy1(Nuy2(Nu zoomAndPan(Nuhref(Nusrc(Nucite(Nuaction(Nulongdesc(Nuposter(Nu background(Nudatasrc(Nudynsrc(Nulowsrc(Nuping(Nu clip-path(Nu color-profile(Nucursor(Nufill(Nufilter(Numarker(Nu marker-start(Nu marker-mid(Nu marker-end(Numask(Nustroke(NualtGlyph(Nuanimate(Nu animateColor(Nu animateMotion(NuanimateTransform(Nucursor(NufeImage(Nufilter(NulinearGradient(Nupattern(NuradialGradient(Nutextpath(Nutref(Nuset(Nuuse(.uazimuthubackground-coloruborder-bottom-coloruborder-collapseu border-coloruborder-left-coloruborder-right-coloruborder-top-coloruclearucolorucursoru directionudisplayu elevationufloatufontu font-familyu font-sizeu font-styleu font-variantu font-weightuheightuletter-spacingu line-heightuoverflowupauseu pause-afteru pause-beforeupitchu pitch-rangeurichnessuspeaku speak-headeru speak-numeraluspeak-punctuationu speech-rateustressu text-alignutext-decorationu text-indentu unicode-bidiuvertical-alignu voice-familyuvolumeu white-spaceuwidth('uautouaquaublackublockublueuboldubothubottomubrownucenterucollapseudashedudottedufuchsiaugrayugreenu !importantuitaliculeftulimeumaroonumediumunoneunavyunormalunowrapuoliveupointerupurpleuredurightusolidusilverutealutopu transparentu underlineuwhiteuyellow(ufillu fill-opacityu fill-ruleustrokeu stroke-widthustroke-linecapustroke-linejoinustroke-opacity(ued2kuftpuhttpuhttpsuircumailtounewsugopherunntputelnetuwebcaluxmppucalltoufeeduurnuaimursyncutagusshusftpurtspuafsudata(u image/pngu image/jpegu image/gifu image/webpu image/bmpu text/plain(!t __future__RRRR$txml.sax.saxutilsRRtpip._vendor.six.movesRR(tRt constantsRRt__all__t frozensetR RR RRRRRRRRR>tVERBOSER+R (((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyts2                                                                                                                                                                                           PK.e[mhtml5lib/filters/__init__.pyonu[ abc@sdS(N((((sI/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.pyttPK.e[!< 'html5lib/filters/inject_meta_charset.pynu[from __future__ import absolute_import, division, unicode_literals from . import base class Filter(base.Filter): def __init__(self, source, encoding): base.Filter.__init__(self, source) self.encoding = encoding def __iter__(self): state = "pre_head" meta_found = (self.encoding is None) pending = [] for token in base.Filter.__iter__(self): type = token["type"] if type == "StartTag": if token["name"].lower() == "head": state = "in_head" elif type == "EmptyTag": if token["name"].lower() == "meta": # replace charset with actual encoding has_http_equiv_content_type = False for (namespace, name), value in token["data"].items(): if namespace is not None: continue elif name.lower() == 'charset': token["data"][(namespace, name)] = self.encoding meta_found = True break elif name == 'http-equiv' and value.lower() == 'content-type': has_http_equiv_content_type = True else: if has_http_equiv_content_type and (None, "content") in token["data"]: token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding meta_found = True elif token["name"].lower() == "head" and not meta_found: # insert meta into empty head yield {"type": "StartTag", "name": "head", "data": token["data"]} yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} yield {"type": "EndTag", "name": "head"} meta_found = True continue elif type == "EndTag": if token["name"].lower() == "head" and pending: # insert meta into head (if necessary) and flush pending queue yield pending.pop(0) if not meta_found: yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} while pending: yield pending.pop(0) meta_found = True state = "post_head" if state == "in_head": pending.append(token) else: yield token PK.e[]ddhtml5lib/filters/sanitizer.pycnu[ abcE@`s2ddlmZmZmZddlZddlmZmZddlm Z ddl m Z ddl mZmZd gZeed d fed d fed d fed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed dfed d fed d!fed d"fed d#fed d$fed d%fed d&fed d'fed d(fed d)fed d*fed d+fed d,fed d-fed d.fed d/fed d0fed d1fed d2fed d3fed d4fed d5fed d6fed d7fed d8fed d9fed d:fed d;fed d<fed d=fed d>fed d?fed d@fed dAfed dBfed dCfed dDfed dEfed dFfed dGfed dHfed dIfed dJfed dKfed dLfed dMfed dNfed dOfed dPfed dQfed dRfed dSfed dTfed dUfed dVfed dWfed dXfed dYfed dZfed d[fed d\fed d]fed d^fed d_fed d`fed dafed dbfed dcfed ddfed defed dffed dgfed dhfed difed djfed dkfed dlfed dmfedndofedndpfedndqfedndrfedndsfedndtfedndufedndvfedndwfedndxfedndyfedndzfednd{fednd|fednd}fednd~fedndfedndfedndfedndfedndfedndfedndfedndfedndfedndfedndfedd feddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddffZed4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtdudvdwdxdydzd{d|d}d~ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddded1dfddddddddddddddddddddddddddddddddddddddddedOdfedOdPfedOd%fdddddddddddddddddddddddddddddddddd d d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcddedOdfedOdfedOdfedOdfedOdPfedOdfedOd%fed1dfed1dfed1dfdedfdgdhfCZedidjdkdldmdndodpdqdrdsedOdfed1dff Zedtdudvdwdxdydzd{d|d}d~f ZedddddddddddddddfZedZedZedZedZedZejd1ejZd2e j fd3YZ dS(i(tabsolute_importtdivisiontunicode_literalsN(tescapetunescape(t urllib_parsei(tbasei(t namespacestprefixesuFilteruhtmluauabbruacronymuaddressuareauarticleuasideuaudioububigu blockquoteubrubuttonucanvasucaptionucenteruciteucodeucolucolgroupucommandudatagridudatalistuddudeludetailsudfnudialogudirudivudludtuemu event-sourceufieldsetu figcaptionufigureufooterufontuformuheaderuh1uh2uh3uh4uh5uh6uhruiuimguinputuinsukeygenukbdulabelulegenduliumumapumenuumeterumulticolunavunextiduoluoutputuoptgroupuoptionupupreuprogressuqususampusectionuselectusmallusoundusourceuspaceruspanustrikeustrongusubusuputableutbodyutdutextareautimeutfootuthutheadutruttuuuuluvaruvideoumathmlumactionumathumerrorumfracumiu mmultiscriptsumnumoumoverumpaddedumphantomu mprescriptsumrootumrowumspaceumsqrtumstyleumsubumsubsupumsupumtableumtdumtextumtrumunderu munderoverunoneusvguanimateu animateColoru animateMotionuanimateTransformuclipPathucircleudefsudescuellipseu font-faceufont-face-nameu font-face-srcuguglyphuhkernulinearGradientulineumarkerumetadatau missing-glyphumpathupathupolygonupolylineuradialGradienturectusetustopuswitchutextutitleutspanuuseuacceptuaccept-charsetu accesskeyuactionualignualtu autocompleteu autofocusuaxisu backgroundubalanceubgcoloru bgpropertiesuborderu bordercolorubordercolordarkubordercolorlightu bottompaddingu cellpaddingu cellspacinguchu challengeucharucharoffuchoffucharsetucheckeduclassuclearucolorucolsucolspanucompactucontenteditableucontrolsucoordsudataudatafldu datapagesizeudatasrcudatetimeudefaultudelayudisabledu draggableudynsrcuenctypeuendufaceuforuframeu galleryimgugutteruheadersuheightu hidefocusuhiddenuhighuhrefuhreflanguhspaceuiconuidu inputmodeuismapukeytypeu leftspacingulangulistulongdesculoopu loopcountuloopendu loopstartulowulowsrcumaxu maxlengthumediaumethoduminumultipleunameunohrefunoshadeunowrapuopenuoptimumupatternupingu point-sizeuposterupqgupreloadupromptu radiogroupureadonlyurelu repeat-maxu repeat-minureplaceurequiredurevu rightspacingurowsurowspanurulesuscopeuselectedushapeusizeusrcustartustepustyleusummaryusuppressutabindexutargetutemplateu toppaddingutypeu unselectableuusemapuurnuvalignuvalueuvariableuvolumeuvspaceuvrmluwidthuwrapuxmlu actiontypeu columnalignu columnlinesu columnspacingu columnspanudepthudisplayu displaystyleu equalcolumnsu equalrowsufenceu fontstyleu fontweightu linethicknessulspaceumathbackgroundu mathcoloru mathvariantumaxsizeuminsizeuotherurowalignurowlinesu rowspacingurspaceu scriptlevelu selectionu separatorustretchyuxlinkushowu accent-heightu accumulateuadditiveu alphabeticu arabic-formuascentu attributeNameu attributeTypeu baseProfileubboxubeginubyucalcModeu cap-heightu clip-pathucolor-renderingucontentucxucyududxudyudescentudurufillu fill-opacityu fill-ruleu font-familyu font-sizeu font-stretchu font-styleu font-variantu font-weightufromufxufyug1ug2u glyph-nameu gradientUnitsuhangingu horiz-adv-xuhoriz-origin-xu ideographicuku keyPointsu keySplinesukeyTimesu marker-endu marker-midu marker-startu markerHeightu markerUnitsu markerWidthu mathematicaluoffsetuopacityuorientuoriginuoverline-positionuoverline-thicknessupanose-1u pathLengthupointsupreserveAspectRatioururefXurefYu repeatCountu repeatDururequiredExtensionsurequiredFeaturesurestarturotateurxuryuslopeustemhustemvu stop-coloru stop-opacityustrikethrough-positionustrikethrough-thicknessustrokeustroke-dasharrayustroke-dashoffsetustroke-linecapustroke-linejoinustroke-miterlimitustroke-opacityu stroke-widthusystemLanguageu text-anchorutou transformuu1uu2uunderline-positionuunderline-thicknessuunicodeu unicode-rangeu units-per-emuvaluesuversionuviewBoxu visibilityuwidthsuxux-heightux1ux2uactuateuarcroleuroleubaseuspaceuyuy1uy2u zoomAndPanu color-profileucursorufilterumaskualtGlyphufeImageutextpathutrefuazimuthubackground-coloruborder-bottom-coloruborder-collapseu border-coloruborder-left-coloruborder-right-coloruborder-top-coloru directionu elevationufloatuletter-spacingu line-heightuoverflowupauseu pause-afteru pause-beforeupitchu pitch-rangeurichnessuspeaku speak-headeru speak-numeraluspeak-punctuationu speech-rateustressu text-alignutext-decorationu text-indentu unicode-bidiuvertical-alignu voice-familyu white-spaceuautouaquaublackublockublueuboldubothubottomubrownucollapseudashedudottedufuchsiaugrayugreenu !importantuitaliculeftulimeumaroonumediumunavyunormaluoliveupointerupurpleuredurightusolidusilverutealutopu transparentu underlineuwhiteuyellowued2kuftpuhttpuhttpsuircumailtounewsugopherunntputelnetuwebcaluxmppucalltoufeeduaimursyncutagusshusftpurtspuafsu image/pngu image/jpegu image/gifu image/webpu image/bmpu text/plainuL ^ # Match a content type / (?P[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) # Match any character set and encoding (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) # Assume the rest is data ,.* $ tFilterc B`sbeZdZeeeeeee e e e d Z dZdZdZdZdZRS(uA sanitization of XHTML+MathML+SVG and of inline style attributes.c C`sttt|j|||_||_||_||_||_||_||_ | |_ | |_ | |_ dS(N( tsuperR t__init__tallowed_elementstallowed_attributestallowed_css_propertiestallowed_css_keywordstallowed_svg_propertiestallowed_protocolstallowed_content_typestattr_val_is_uritsvg_attr_val_allows_reftsvg_allow_local_href( tselftsourceR R RRRRRRRR((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR s          cc`s>x7tjj|D]#}|j|}|r|VqqWdS(N(RR t__iter__tsanitize_token(Rttoken((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyRscC`s|d}|d kr|d}|d}||f|jksd|dkrqtd|f|jkrq|j|S|j|Sn|dkrn|SdS( NutypeuStartTaguEndTaguEmptyTagunameu namespaceuhtmluComment(uStartTaguEndTaguEmptyTag(R tNoneRt allowed_tokentdisallowed_token(RRt token_typetnamet namespace((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyRs       c C`sKd|krG|d}t|j}x-||jD]}|d|=|j|q6Wx||j@D]}||ks~ttjddt||j }|j dd}yt j |}Wnt k rd}||=nX|rf|jrf|j|jkr||=n|jdkrmtj|j}|sE||=qj|jd|jkrj||=qjqmqfqfWxC|jD]8}||kr{tjddt||||unameudatauStartTaguEmptyTagu %s="%s"u%s:%su<%s%s>uu<%s>u selfClosingiu/>u Characters(uStartTaguEmptyTag(R$titemstappendRRRtjointget(RRRR2tnsRtv((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR2s   #A$ cC`sctjdjd|}tjd|s1dStjd|sGdSg}xtjd|D]\}}|sxq`n|j|jkr|j|d|dq`|jd d jdkr!x|jD],}||j krtjd| rPqqW|j|d|dq`|j|j kr`|j|d|dq`q`Wdj |S(Nuurl\s*\(\s*[^\s)]+?\s*\)\s*u u@^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$uu ^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$u([-\w]+)\s*:\s*([^:;]*)u: u;u-iu backgrounduborderumarginupaddingu\^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$(u backgrounduborderumarginupadding( R%tcompileR&R-tfindallR'RR:tsplitRRR;(Rtstyletcleantproptvaluetkeyword((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR1Fs*  (t__name__t __module__t__doc__R R RRRRRRRRR RRRRR1(((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyR s    2 (Nuabbr(Nuaccept(Nuaccept-charset(Nu accesskey(Nuaction(Nualign(Nualt(Nu autocomplete(Nu autofocus(Nuaxis(Nu background(Nubalance(Nubgcolor(Nu bgproperties(Nuborder(Nu bordercolor(Nubordercolordark(Nubordercolorlight(Nu bottompadding(Nu cellpadding(Nu cellspacing(Nuch(Nu challenge(Nuchar(Nucharoff(Nuchoff(Nucharset(Nuchecked(Nucite(Nuclass(Nuclear(Nucolor(Nucols(Nucolspan(Nucompact(Nucontenteditable(Nucontrols(Nucoords(Nudata(Nudatafld(Nu datapagesize(Nudatasrc(Nudatetime(Nudefault(Nudelay(Nudir(Nudisabled(Nu draggable(Nudynsrc(Nuenctype(Nuend(Nuface(Nufor(Nuform(Nuframe(Nu galleryimg(Nugutter(Nuheaders(Nuheight(Nu hidefocus(Nuhidden(Nuhigh(Nuhref(Nuhreflang(Nuhspace(Nuicon(Nuid(Nu inputmode(Nuismap(Nukeytype(Nulabel(Nu leftspacing(Nulang(Nulist(Nulongdesc(Nuloop(Nu loopcount(Nuloopend(Nu loopstart(Nulow(Nulowsrc(Numax(Nu maxlength(Numedia(Numethod(Numin(Numultiple(Nuname(Nunohref(Nunoshade(Nunowrap(Nuopen(Nuoptimum(Nupattern(Nuping(Nu point-size(Nuposter(Nupqg(Nupreload(Nuprompt(Nu radiogroup(Nureadonly(Nurel(Nu repeat-max(Nu repeat-min(Nureplace(Nurequired(Nurev(Nu rightspacing(Nurows(Nurowspan(Nurules(Nuscope(Nuselected(Nushape(Nusize(Nuspan(Nusrc(Nustart(Nustep(Nustyle(Nusummary(Nusuppress(Nutabindex(Nutarget(Nutemplate(Nutitle(Nu toppadding(Nutype(Nu unselectable(Nuusemap(Nuurn(Nuvalign(Nuvalue(Nuvariable(Nuvolume(Nuvspace(Nuvrml(Nuwidth(Nuwrap(Nu actiontype(Nualign(Nu columnalign(Nu columnalign(Nu columnalign(Nu columnlines(Nu columnspacing(Nu columnspan(Nudepth(Nudisplay(Nu displaystyle(Nu equalcolumns(Nu equalrows(Nufence(Nu fontstyle(Nu fontweight(Nuframe(Nuheight(Nu linethickness(Nulspace(Numathbackground(Nu mathcolor(Nu mathvariant(Nu mathvariant(Numaxsize(Numinsize(Nuother(Nurowalign(Nurowalign(Nurowalign(Nurowlines(Nu rowspacing(Nurowspan(Nurspace(Nu scriptlevel(Nu selection(Nu separator(Nustretchy(Nuwidth(Nuwidth(Nu accent-height(Nu accumulate(Nuadditive(Nu alphabetic(Nu arabic-form(Nuascent(Nu attributeName(Nu attributeType(Nu baseProfile(Nubbox(Nubegin(Nuby(NucalcMode(Nu cap-height(Nuclass(Nu clip-path(Nucolor(Nucolor-rendering(Nucontent(Nucx(Nucy(Nud(Nudx(Nudy(Nudescent(Nudisplay(Nudur(Nuend(Nufill(Nu fill-opacity(Nu fill-rule(Nu font-family(Nu font-size(Nu font-stretch(Nu font-style(Nu font-variant(Nu font-weight(Nufrom(Nufx(Nufy(Nug1(Nug2(Nu glyph-name(Nu gradientUnits(Nuhanging(Nuheight(Nu horiz-adv-x(Nuhoriz-origin-x(Nuid(Nu ideographic(Nuk(Nu keyPoints(Nu keySplines(NukeyTimes(Nulang(Nu marker-end(Nu marker-mid(Nu marker-start(Nu markerHeight(Nu markerUnits(Nu markerWidth(Nu mathematical(Numax(Numin(Nuname(Nuoffset(Nuopacity(Nuorient(Nuorigin(Nuoverline-position(Nuoverline-thickness(Nupanose-1(Nupath(Nu pathLength(Nupoints(NupreserveAspectRatio(Nur(NurefX(NurefY(Nu repeatCount(Nu repeatDur(NurequiredExtensions(NurequiredFeatures(Nurestart(Nurotate(Nurx(Nury(Nuslope(Nustemh(Nustemv(Nu stop-color(Nu stop-opacity(Nustrikethrough-position(Nustrikethrough-thickness(Nustroke(Nustroke-dasharray(Nustroke-dashoffset(Nustroke-linecap(Nustroke-linejoin(Nustroke-miterlimit(Nustroke-opacity(Nu stroke-width(NusystemLanguage(Nutarget(Nu text-anchor(Nuto(Nu transform(Nutype(Nuu1(Nuu2(Nuunderline-position(Nuunderline-thickness(Nuunicode(Nu unicode-range(Nu units-per-em(Nuvalues(Nuversion(NuviewBox(Nu visibility(Nuwidth(Nuwidths(Nux(Nux-height(Nux1(Nux2(Nuy(Nuy1(Nuy2(Nu zoomAndPan(Nuhref(Nusrc(Nucite(Nuaction(Nulongdesc(Nuposter(Nu background(Nudatasrc(Nudynsrc(Nulowsrc(Nuping(Nu clip-path(Nu color-profile(Nucursor(Nufill(Nufilter(Numarker(Nu marker-start(Nu marker-mid(Nu marker-end(Numask(Nustroke(NualtGlyph(Nuanimate(Nu animateColor(Nu animateMotion(NuanimateTransform(Nucursor(NufeImage(Nufilter(NulinearGradient(Nupattern(NuradialGradient(Nutextpath(Nutref(Nuset(Nuuse(.uazimuthubackground-coloruborder-bottom-coloruborder-collapseu border-coloruborder-left-coloruborder-right-coloruborder-top-coloruclearucolorucursoru directionudisplayu elevationufloatufontu font-familyu font-sizeu font-styleu font-variantu font-weightuheightuletter-spacingu line-heightuoverflowupauseu pause-afteru pause-beforeupitchu pitch-rangeurichnessuspeaku speak-headeru speak-numeraluspeak-punctuationu speech-rateustressu text-alignutext-decorationu text-indentu unicode-bidiuvertical-alignu voice-familyuvolumeu white-spaceuwidth('uautouaquaublackublockublueuboldubothubottomubrownucenterucollapseudashedudottedufuchsiaugrayugreenu !importantuitaliculeftulimeumaroonumediumunoneunavyunormalunowrapuoliveupointerupurpleuredurightusolidusilverutealutopu transparentu underlineuwhiteuyellow(ufillu fill-opacityu fill-ruleustrokeu stroke-widthustroke-linecapustroke-linejoinustroke-opacity(ued2kuftpuhttpuhttpsuircumailtounewsugopherunntputelnetuwebcaluxmppucalltoufeeduurnuaimursyncutagusshusftpurtspuafsudata(u image/pngu image/jpegu image/gifu image/webpu image/bmpu text/plain(!t __future__RRRR%txml.sax.saxutilsRRtpip._vendor.six.movesRR)tRt constantsRRt__all__t frozensetR RR RRRRRRRRR?tVERBOSER,R (((sJ/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyts2                                                                                                                                                                                           PK.e[\44!html5lib/filters/optionaltags.pyonu[ abc@`sIddlmZmZmZddlmZdejfdYZdS(i(tabsolute_importtdivisiontunicode_literalsi(tbasetFiltercB`s,eZdZdZdZdZRS(cc`shd}}x:|jD]/}|dk r7|||fVn|}|}qW|dk rd||dfVndS(N(tNonetsource(tselft previous1t previous2ttoken((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pytsliders    cc`sx|jD]\}}}|d}|dkra|dsV|j|d|| r|Vqq |dkr|j|d|s|Vqq |Vq WdS(NutypeuStartTagudataunameuEndTag(R tis_optional_starttis_optional_end(RtpreviousR tnextttype((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyt__iter__s      cC`s*|r|dpd}|dkr,|dkS|dkre|dkrHtS|dkr&|d dkSn|d kr|dkrtS|dkr|d dkStSn|d kr|dkr|d dkStSnW|dkr&|dkr|r|ddkr|d dkrtS|d dkStSntS(NutypeuhtmluCommentuSpaceCharactersuheaduStartTaguEmptyTaguEndTagunameubodyuscriptustyleucolgroupucolutbodyutheadutfootutr(uCommentuSpaceCharacters(uStartTaguEmptyTag(uCommentuSpaceCharacters(uscriptustyle(uStartTaguEmptyTag(utbodyutheadutfoot(RtTruetFalse(RttagnameRRR((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyR s4            cC`s|r|dpd}|d5kr,|d6kS|d7krk|d krR|d |kS|d kpg|dkSn|d8kr|d kr|d d9kS|dkr|d kp|dkStSn|dkr|d:kr|d d;kS|d kp|dkSn|d,kr8|d kr|d d<kS|d kp4|dkSnI|d=krw|d kr^|d d>kS|d kps|dkSn |d/kr|d?krtS|d kr|d d/kStSn|d@kr|d kr|d dAkS|d1kr|d kp|dkStSn~|d2krB|d kr)|d d1kS|d kp>|dkSn?|dBkr|d krh|d dCkS|d kp}|dkSntS(DNutypeuhtmluheadubodyuCommentuSpaceCharactersuliuoptgrouputruStartTagunameuEndTagudtuddupuEmptyTaguaddressuarticleuasideu blockquoteudatagridudialogudirudivudlufieldsetufooteruformuh1uh2uh3uh4uh5uh6uheaderuhrumenuunavuolupreusectionutableuuluoptionurturpucolgrouputheadutbodyutfootutduth(uhtmluheadubody(uCommentuSpaceCharacters(uliuoptgrouputr(udtudd(udtudd(uStartTaguEmptyTag(uaddressuarticleuasideu blockquoteudatagridudialogudirudivudlufieldsetufooteruformuh1uh2uh3uh4uh5uh6uheaderuhrumenuunavuolupupreusectionutableuul(uoptionuoptgroup(urturp(urturp(uCommentuSpaceCharacters(utheadutbody(utbodyutfoot(utduth(utduth(RRR(RRRR((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyR Wsf                     (t__name__t __module__R RR R (((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyRs 9N(t __future__RRRtRR(((sM/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pytsPK.e[Y\(html5lib/filters/inject_meta_charset.pycnu[ abc@`sIddlmZmZmZddlmZdejfdYZdS(i(tabsolute_importtdivisiontunicode_literalsi(tbasetFiltercB`seZdZdZRS(cC`s tjj||||_dS(N(RRt__init__tencoding(tselftsourceR((sT/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.pyRsc c`sd}|jdk}g}xvtjj|D]b}|d}|dkrl|djdkrod}qon|dkr|djdkr^t}xE|d jD]~\\}}} |dk rqq|jd kr|j|d ||fsPK.e[html5lib/filters/__init__.pynu[PK.e[{hDDhtml5lib/constants.pycnu[ abcP@`sNddlmZmZmZddlZdZidd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6dUd]6dUd^6d_d`6dadb6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6dqdr6dsdt6dudv6dwdx6dydz6d{d|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Zidd6dd6d d 6d d 6d d6dd6Ze eddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfeddfed dfed dfed d fgZ e edd!fedd"fedd#fedd$fedd%fedd&fedd'fedd(fedd)fedd*fedd+fedd,fedd-fedd.fgZ e edd/feddfedd0fedd1fedd2fedd3fedd4fedd5fedd6fedd7fedd8fedd9feddfedd:fedd;fedd<fedd=fedd>fedd?fedd@feddAfeddBfeddCfeddDfeddEfeddFfeddGfeddHfeddIfeddJfeddKfeddLfeddMfeddNfeddOfeddPfeddQfeddRfeddSfeddfeddTfeddUfeddVfeddWfeddXfeddYfeddZfedd[feddfedd\fedd]fedd^fedd_fedd`feddafeddfeddbfeddcfedddfeddefeddffeddgfeddhfeddifeddjfeddfeddkfeddfeddlfeddmfeddfeddnfedd feddofeddpfeddqfeddrfed dfgNZ e eddsfed dfed dfed d fgZ e eddfeddfeddfeddfeddfgZi>dtdu6dvdw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Zidd6Zi d ded fd6d ded fd6d ded fd6d ded fd6d ded fd6d d ed fd6d ded fd6dd3edfd6ddedfd6ddedfd6ddedfd6dd edfd6ZegejD]'\Z\ZZZeefef^q Ze ddddd gZe ddkdmdndogZe ejZe ejZe ejZ e ej!Z!e ej"Z#egejD]$Z$e%e$e%e$j&f^q Z'dZ(e d3d=d dZd]dSd8dVdDddd0d;dWd d gZ)e d dlgZ*e djdgdrdTd_d`dagZ+ie d gd6e dgdj6e dgdV6e ddgd6e ddgd6e ddgdg6e dgd?6e ddgd6e ddddgd=6e dgdS6e dgd\6e dd gdE6e dd d!gd"6e dd gd#6e dd$gd96e dd d%d$ddgdW6e dd d$dgdi6e dd gd&6Z,dZ-e dCdDdEdFdGgZ.idHdI6dHdJ6dKdL6dKdM6dNdO6dNdP6dQdR6dSdT6dSdU6dVdW6dXdY6dZd[6dZd\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6didk6dldm6dndo6dpdq6dpdr6dsdt6dsdu6dvdw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6d d!6d"d#6d$d%6d&d'6d(d)6d*d+6d,d-6d.d/6d0d16d2d36dd46d5d66d7d86d9d:6d;d<6d;d=6d>d?6d>d@6dAdB6dCdD6dCdE6dFdG6dHdI6dJdK6dLdM6dLdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6didk6dldm6dndo6dpdq6drds6dtdu6dvdw6dxdy6dzd{6d|d}6d|d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6ddW6ddX6dYdZ6d[d\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6dqdr6d ds6d dt6ddu6dvdw6dxdy6dzd{6dd|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6d"d#6d"d$6d%d&6d'd(6d)d*6d+d,6d+d-6d.d/6d0d16d2d36d4d56d6d76d8d96d:d;6d<d=6d>d?6d>d@6dAdB6dAdC6dDdE6dFdG6dFdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6dd]6d^d_6d`da6dbdc6ddde6dfdg6dhdi6djdk6dldm6ddn6dodp6dqdr6dsdt6dudv6dudw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d)d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6d"d#6d$d%6d&d'6dd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dKdM6dNdO6dPdQ6dRdS6dTdU6dVdW6dVdX6dYdZ6d[d\6d]d^6d_d`6d_da6dbdc6ddde6dfdg6dhdi6djdk6dldm6dndo6dpdq6drds6ddt6dudv6dwdx6dydz6d{d|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dgd6dd6dd6dd 6d d 6d d 6d d6dd6dd6dKd6dKdE6dd6dd6dd6dd6dd6dd6d d!6dd"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6did=6d>d?6d@dA6dBdC6dAdD6dEdF6dGdH6dIdJ6dKdL6dMdF6dAdN6dIdO6dPdQ6dPdR6dSdT6dUdV6dAdW6ddX6dYdZ6dYd[6d\d]6d\d^6dd_6d`da6dbdc6ddde6dfdg6dhdi6djdk6dldm6dndo6dpdq6dpdr6dhds6dtdu6dddv6dwdx6dydz6d~d{6d~d|6d}d~6dfd6dd6dd6dd6dd6dd6dd6dd6dld6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dvd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d}d6d}d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dhd6dd6dd6dd6dd6d d!6djd"6dld#6d$d%6d&d'6d(d)6d*d+6d*d,6dd-6d.d/6dd06dd16d2d36d4d56d6d76d8d96d:d;6d<d=6d>d?6d@dA6dBdC6ddD6dEdF6dGdH6dIdJ6dIdK6dLdM6dNdO6dPdQ6dRdS6ddT6ddU6dVdW6dXdY6dXdZ6dd[6d\d]6d^d_6d`da6d`db6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6ddq6drds6dtdu6dvdw6dxdy6dkdz6d{d|6d}d~6dd6dd6dd6dd6dnd6dnd6dd6dd6dd6dd6dd6dd6d?d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d?d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d5d6dd 6dd 6dd 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6d d 6d d! 6d" d# 6d$ d% 6dzd& 6dd' 6dd( 6d5d) 6dd* 6d~d+ 6d, d- 6d. d/ 6d0 d1 6d2 d3 6d4 d5 6d6 d7 6d8 d9 6d: d; 6dd< 6dd= 6dd> 6d? d@ 6dA dB 6dC dD 6ddE 6d dF 6dG dH 6dG dI 6dJ dK 6dL dM 6dN dO 6dP dQ 6dP dR 6dS dT 6dU dV 6dW dX 6dndY 6dZ d[ 6d\ d] 6d^ d_ 6d` da 6d` db 6dc dd 6de df 6dg dh 6di dj 6dk dl 6dm dn 6do dp 6dq dr 6ds dt 6ds du 6ds dv 6dw dx 6dy dz 6d{ d| 6d} d~ 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dN d 6dS d 6d_d 6dc d 6dm d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6dd 6d_d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dld 6dcd 6dnd 6dZ d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dzd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6d d 6d d 6d d! 6d" d# 6dd$ 6dd% 6d& d' 6d( d) 6dd* 6d+ d, 6d- d. 6d/ d0 6d1 d2 6d3 d4 6d3 d5 6d6 d7 6d6 d8 6d1 d9 6d: d; 6d< d= 6dd> 6d? d@ 6ddA 6dB dC 6dD dE 6ddF 6ddD6dG dH 6dI dJ 6dK dL 6dM dN 6dO dP 6d dQ 6dR dS 6dK dT 6ddU 6d dV 6ddW 6ddX 6dY dZ 6dY d[ 6dd\ 6dd] 6d d^ 6dd_ 6d` da 6d;db 6dc dd 6de df 6dg dh 6di dj 6dk dl 6dk dm 6dn do 6dp dq 6dr ds 6dt du 6dv dw 6dx dy 6dz d{ 6d| d} 6d~ d 6d d 6d d 6d d 6dg d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6d d 6d d 6d d 6dOd 6d d 6d d 6d d 6d d 6dd 6d d 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dOd 6d d 6d d 6d d 6d d 6dOd 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6did 6dd 6d d 6d d 6d[d 6d d 6d d 6d d 6d d 6dd 6d d 6d'd 6d d 6d'd 6d! d" 6d# d$ 6d# d% 6d)d& 6d+d' 6d( d) 6d* d+ 6d| d, 6d- d. 6d/ d0 6d1 d2 6d3 d4 6d5 d6 6d7 d8 6d9 d: 6d; d< 6d= d> 6d? d@ 6dA dB 6dC dD 6dE dF 6dG dH 6dI dJ 6dK dL 6dM dN 6d/dO 6dA dP 6dQ dR 6dS dT 6d6dU 6dydV 6dW dX 6dY dZ 6d[ d\ 6d] d^ 6d)d_ 6d3 d` 6d&da 6dSdb 6dc dd 6d;de 6d-df 6ddg 6de dh 6di dj 6dYdk 6d] dl 6d[dm 6dadn 6dado 6dp dq 6dr ds 6dt du 6dv dw 6dx dy 6dz d{ 6d! d| 6d} d~ 6dYd 6d d 6d]d 6dcd 6d d 6d9d 6d d 6d]d 6d d 6d&d 6dSd 6d d 6d d 6d d 6dd 6dc d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d1d 6dmd 6dod 6d d 6dqd 6d- d 6d d 6d d 6d d 6d d 6d d 6d d 6ddd 6d d 6d d 6dd 6d d 6d d 6d-d 6d, d 6dd 6d d 6d d 6d d 6d d 6d d 6d}d 6dcd 6d d 6d d 6dC d 6d8d 6d d 6d d 6dd 6ddC6d d 6d d 6d} d 6di d 6d d 6d d 6d d 6d d 6d d 6dId 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd 6dd 6d2d 6dAd 6dd 6d d 6d d 6d d 6d d 6d#d 6d d 6d d 6d d 6d d 6dd 6dUd 6d d 6dd 6dd! 6d" d# 6dd$ 6d d% 6d& d' 6d( d) 6dn d* 6dd+ 6d, d- 6d. d/ 6dd0 6d1 d2 6dd3 6d4 d5 6d6 d7 6d6 d8 6d9 d: 6d; d< 6dd= 6d> d? 6d@ dA 6dB dC 6dD dE 6ddF 6dG dH 6dI dJ 6dK dL 6ddM 6dN dO 6dP dQ 6ddR 6dS dT 6dU dV 6dW dX 6ddY 6dZ d[ 6dZ d\ 6dd] 6dd^ 6dd_ 6dd` 6dda 6db dc 6dd de 6df dg 6ddh 6di dj 6dk dl 6dm dn 6do dp 6ddq 6dr ds 6dt du 6ddv 6ddw 6dx dy 6ddz 6d{ d| 6dd} 6dd~ 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6d@ d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d> d 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dd 6dd 6d d 6d d 6dd 6dd 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6dG d 6d d 6d d 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6d d 6dd 6dd 6d d 6dd 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6dd 6dd 6dd 6d d 6dd 6dd 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dd 6dd 6dd 6dd 6dd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6d! d" 6d# d$ 6d% d& 6d' d( 6dd) 6dd* 6d+ d, 6drd- 6d. d/ 6d. d0 6dtd1 6dvd2 6d3 d4 6d3 d5 6d6 d7 6dxd8 6d9 d: 6d; d< 6dd= 6d> d? 6d@ dA 6dB dC 6dD dE 6dF dG 6dH dI 6dH dJ 6dK dL 6dM dN 6d0dO 6ddP 6dmdQ 6dR dS 6dT dU 6dIdV 6dW dX 6dY dZ 6d[ d\ 6d] d^ 6d_ d` 6dda 6db dc 6dd de 6df dg 6ddh 6di dj 6dodk 6dl dm 6dn do 6dn dp 6dq dr 6dq ds 6dt du 6dt dv 6dw dx 6dy dz 6d{ d| 6d} d~ 6dn d 6d d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6dd 6d d 6d d 6dQd 6d d 6d d 6d d 6d d 6d}d 6d d 6d d 6d d 6d d 6dd 6d d 6d d 6d d 6d d 6dg d 6d d 6dg d 6d d 6d d 6dd 6d d 6d" d 6d d 6d d 6d[d 6d[d 6d d 6d d 6d[d 6d d 6d d 6d d 6d d 6dbd 6d d 6d d 6dfd 6ddd 6dbd 6d d 6dfd 6ddd 6d d 6d d 6d d 6dhd 6d d 6d^d 6d d 6d d 6d d 6dld 6d d 6d d 6d d 6dod 6dod 6dhd 6d d 6d d 6d d 6d d 6d d 6d d 6d d 6dd6dd6dd6dd6dd6d d 6dud 6dudG6dd 6dd 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6dd!6dd"6d#d$6dd%6d&d'6d(d)6d*d+6d~ d,6d d-6d.d/6d0d16d2d36d4d56d6d76d8d96dzd:6dd;6d<d=6d>d?6d@dA6dBdC6dDdE6dFdG6dHdI6dJdK6ddL6d>dM6dNdO6dPdQ6dRdS6ddT6ddU6dVdW6ddX6ddY6ddZ6dd[6d\d]6dd^6dd_6d`da6ddb6dcdd6d,de6ddf6dgdh6didj6dkdl6ddm6d2dn6d,do6ddp6ddq6dadr6dsdt6d4du6dvdw6dxdy6d dz6dd{6dad|6d}d~6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dsd6dd6dd6dd6d@d6dd6dd6dvd6dd6dd6dd6dd6dd6dd6dd6dd6d d6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d$ d6dd6dd6dt d6dzd6dzd6dd6dd6dd6dd6dvd6dvd6dd6dd6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6d;d6dd6d=d6d=d6dd6dd6dd6dd6dd6dd6dd6d)d6dvd6dd6dd6dd 6d d 6d d 6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6dd#6d$d%6dd&6dd'6dd(6dd)6dd*6dd+6dd,6dd-6dd.6dd/6dvd06dvd16dd26d3d46dvd56d d66dd76d8d96dd:6d d;6d d<6d d=6d>d?6d@dA6dBdC6d dD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6d>dQ6d dR6d@dS6dKdT6dIdU6dVdW6dXdY6dZd[6d d\6dd]6dd^6dd_6dd`6dda6ddb6ddc6ddd6dedf6dgdh6dgdi6djdk6djdl6dmdn6dmdo6ddp6dqdr6dsdt6dudv6ddw6dxdy6dzd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dqd6dd6dd6dd6dd6dd6dd6dv d6dxd6dxd6dd6dd6dd6dd6dd6dMd6dd6dd6dd6dEd6dd6dd6d3d6d3d6dd6dd6dd6dAd6d;d6d9d6dAd6d;d6dd6dd6dd6dd6dd6dd6dd6dd6d d6d{ d6d0d6dd6dd6dd6dd6dd6dd6d"d6dd6d: d6d d6dId6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dwd6dd6d{d6d d 6d d 6d d6d d6dOd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6dd#6dyd$6dOd%6dd&6dnd'6d(d)6dd*6d(d+6d,d-6d.d/6d.d06d1d26d3d46d5d66d7d86d9d:6d;d<6dd=6dd>6d,d?6d@dA6d@dB6dCdD6ddE6dFdG6dHdI6ddJ6dKdL6d dM6d dN6ds dO6d dP6d dQ6dodR6dydS6dkdT6ddU6dVdW6dXdY6dZd[6d\d]6dd^6dEd_6dd`6dadb6ddc6di dd6dedf6dgdh6didj6ddk6ddl6dmdn6dEdo6ddp6ddq6drds6dodt6ddu6dvdw6dXdx6dVdy6d\dz6dZd{6d|d}6d~d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dld6dd6dd6d d6dod6dd6d d6dmd6d d6dd6dd6dd6dd6dd6dd6dqd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd6Z/i"dd6d d6d d6dd6d d6d d6dyd6dn d6dd6dd6did6d d6dd6d d6dd6dd6dd6dd6dd6d8d6dd6d6d6dd6d*d6do d6d d6dd6d"d6dd6dd6d@ d6dd6dd6dd6Z0idd6dd6d d 6d d 6d d6dd6dd6dd6Z1e e1d e1de1dgZ2egejD]\Z3Z4e4e3f^qDNZ5de5d' instead.u'expected-tag-name-but-got-right-bracketuSExpected tag name. Got '?' instead. (HTML doesn't support processing instructions.)u'expected-tag-name-but-got-question-marku-Expected tag name. Got something else insteaduexpected-tag-nameu6Expected closing tag. Got '>' instead. Ignoring ''.u*expected-closing-tag-but-got-right-bracketu-Expected closing tag. Unexpected end of file.u expected-closing-tag-but-got-eofu<Expected closing tag. Unexpected character '%(data)s' found.u!expected-closing-tag-but-got-charu'Unexpected end of file in the tag name.ueof-in-tag-nameu8Unexpected end of file. Expected attribute name instead.u#expected-attribute-name-but-got-eofu)Unexpected end of file in attribute name.ueof-in-attribute-nameu#Invalid character in attribute nameu#invalid-character-in-attribute-nameu#Dropped duplicate attribute on tag.uduplicate-attributeu1Unexpected end of file. Expected = or end of tag.u$expected-end-of-tag-name-but-got-eofu1Unexpected end of file. Expected attribute value.u$expected-attribute-value-but-got-eofu*Expected attribute value. Got '>' instead.u.expected-attribute-value-but-got-right-bracketu"Unexpected = in unquoted attributeu"equals-in-unquoted-attribute-valueu*Unexpected character in unquoted attributeu0unexpected-character-in-unquoted-attribute-valueu*Unexpected character after attribute name.u&invalid-character-after-attribute-nameu+Unexpected character after attribute value.u*unexpected-character-after-attribute-valueu.Unexpected end of file in attribute value (").u#eof-in-attribute-value-double-quoteu.Unexpected end of file in attribute value (').u#eof-in-attribute-value-single-quoteu*Unexpected end of file in attribute value.u eof-in-attribute-value-no-quotesu)Unexpected end of file in tag. Expected >u#unexpected-EOF-after-solidus-in-tagu/Unexpected character after / in tag. Expected >u)unexpected-character-after-solidus-in-tagu&Expected '--' or 'DOCTYPE'. Not found.uexpected-dashes-or-doctypeu Unexpected ! after -- in commentu,unexpected-bang-after-double-dash-in-commentu$Unexpected space after -- in commentu-unexpected-space-after-double-dash-in-commentuIncorrect comment.uincorrect-commentu"Unexpected end of file in comment.ueof-in-commentu%Unexpected end of file in comment (-)ueof-in-comment-end-dashu+Unexpected '-' after '--' found in comment.u,unexpected-dash-after-double-dash-in-commentu'Unexpected end of file in comment (--).ueof-in-comment-double-dashueof-in-comment-end-space-stateueof-in-comment-end-bang-stateu&Unexpected character in comment found.uunexpected-char-in-commentu(No space after literal string 'DOCTYPE'.uneed-space-after-doctypeu.Unexpected > character. Expected DOCTYPE name.u+expected-doctype-name-but-got-right-bracketu.Unexpected end of file. Expected DOCTYPE name.u!expected-doctype-name-but-got-eofu'Unexpected end of file in DOCTYPE name.ueof-in-doctype-nameu"Unexpected end of file in DOCTYPE.ueof-in-doctypeu%Expected space or '>'. Got '%(data)s'u*expected-space-or-right-bracket-in-doctypeuUnexpected end of DOCTYPE.uunexpected-end-of-doctypeu Unexpected character in DOCTYPE.uunexpected-char-in-doctypeuXXX innerHTML EOFueof-in-innerhtmluUnexpected DOCTYPE. Ignored.uunexpected-doctypeu%html needs to be the first start tag.u non-html-rootu)Unexpected End of file. Expected DOCTYPE.uexpected-doctype-but-got-eofuErroneous DOCTYPE.uunknown-doctypeu2Unexpected non-space characters. Expected DOCTYPE.uexpected-doctype-but-got-charsu2Unexpected start tag (%(name)s). Expected DOCTYPE.u"expected-doctype-but-got-start-tagu0Unexpected end tag (%(name)s). Expected DOCTYPE.u expected-doctype-but-got-end-tagu?Unexpected end tag (%(name)s) after the (implied) root element.uend-tag-after-implied-rootu4Unexpected end of file. Expected end tag (%(name)s).u&expected-named-closing-tag-but-got-eofu4Unexpected start tag head in existing head. Ignored.u!two-heads-are-not-better-than-oneu'Unexpected end tag (%(name)s). Ignored.uunexpected-end-tagu;Unexpected start tag (%(name)s) that can be in head. Moved.u#unexpected-start-tag-out-of-my-headu Unexpected start tag (%(name)s).uunexpected-start-taguMissing end tag (%(name)s).umissing-end-taguMissing end tags (%(name)s).umissing-end-tagsuCUnexpected start tag (%(startName)s) implies end tag (%(endName)s).u$unexpected-start-tag-implies-end-tagu@Unexpected start tag (%(originalName)s). Treated as %(newName)s.uunexpected-start-tag-treated-asu,Unexpected start tag %(name)s. Don't use it!udeprecated-tagu'Unexpected start tag %(name)s. Ignored.uunexpected-start-tag-ignoreduEUnexpected end tag (%(gotName)s). Missing end tag (%(expectedName)s).u$expected-one-end-tag-but-got-anotheru:End tag (%(name)s) seen too early. Expected other end tag.uend-tag-too-earlyuFUnexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).uend-tag-too-early-namedu+End tag (%(name)s) seen too early. Ignored.uend-tag-too-early-ignoreduQEnd tag (%(name)s) violates step 1, paragraph 1 of the adoption agency algorithm.uadoption-agency-1.1uQEnd tag (%(name)s) violates step 1, paragraph 2 of the adoption agency algorithm.uadoption-agency-1.2uQEnd tag (%(name)s) violates step 1, paragraph 3 of the adoption agency algorithm.uadoption-agency-1.3uQEnd tag (%(name)s) violates step 4, paragraph 4 of the adoption agency algorithm.uadoption-agency-4.4u>Unexpected end tag (%(originalName)s). Treated as %(newName)s.uunexpected-end-tag-treated-asu'This element (%(name)s) has no end tag.u no-end-tagu9Unexpected implied end tag (%(name)s) in the table phase.u#unexpected-implied-end-tag-in-tableu>Unexpected implied end tag (%(name)s) in the table body phase.u(unexpected-implied-end-tag-in-table-bodyuDUnexpected non-space characters in table context caused voodoo mode.u$unexpected-char-implies-table-voodoou3Unexpected input with type hidden in table context.u unexpected-hidden-input-in-tableu!Unexpected form in table context.uunexpected-form-in-tableuDUnexpected start tag (%(name)s) in table context caused voodoo mode.u)unexpected-start-tag-implies-table-voodoouBUnexpected end tag (%(name)s) in table context caused voodoo mode.u'unexpected-end-tag-implies-table-voodoouCUnexpected table cell start tag (%(name)s) in the table body phase.uunexpected-cell-in-table-bodyuFGot table cell end tag (%(name)s) while required end tags are missing.uunexpected-cell-end-tagu?Unexpected end tag (%(name)s) in the table body phase. Ignored.u unexpected-end-tag-in-table-bodyu=Unexpected implied end tag (%(name)s) in the table row phase.u'unexpected-implied-end-tag-in-table-rowu>Unexpected end tag (%(name)s) in the table row phase. Ignored.uunexpected-end-tag-in-table-rowuJUnexpected select start tag in the select phase treated as select end tag.uunexpected-select-in-selectu/Unexpected input start tag in the select phase.uunexpected-input-in-selectuBUnexpected start tag token (%(name)s in the select phase. Ignored.uunexpected-start-tag-in-selectu;Unexpected end tag (%(name)s) in the select phase. Ignored.uunexpected-end-tag-in-selectuKUnexpected table element start tag (%(name)s) in the select in table phase.u5unexpected-table-element-start-tag-in-select-in-tableuIUnexpected table element end tag (%(name)s) in the select in table phase.u3unexpected-table-element-end-tag-in-select-in-tableu8Unexpected non-space characters in the after body phase.uunexpected-char-after-bodyu>Unexpected start tag token (%(name)s) in the after body phase.uunexpected-start-tag-after-bodyu<Unexpected end tag token (%(name)s) in the after body phase.uunexpected-end-tag-after-bodyu@Unexpected characters in the frameset phase. Characters ignored.uunexpected-char-in-framesetuEUnexpected start tag token (%(name)s) in the frameset phase. Ignored.u unexpected-start-tag-in-framesetuFUnexpected end tag token (frameset) in the frameset phase (innerHTML).u)unexpected-frameset-in-frameset-innerhtmluCUnexpected end tag token (%(name)s) in the frameset phase. Ignored.uunexpected-end-tag-in-framesetuEUnexpected non-space characters in the after frameset phase. Ignored.uunexpected-char-after-framesetuEUnexpected start tag (%(name)s) in the after frameset phase. Ignored.u#unexpected-start-tag-after-framesetuCUnexpected end tag (%(name)s) in the after frameset phase. Ignored.u!unexpected-end-tag-after-framesetu(Unexpected end tag after body(innerHtml)u'unexpected-end-tag-after-body-innerhtmlu6Unexpected non-space characters. Expected end of file.uexpected-eof-but-got-charu6Unexpected start tag (%(name)s). Expected end of file.uexpected-eof-but-got-start-tagu4Unexpected end tag (%(name)s). Expected end of file.uexpected-eof-but-got-end-tagu/Unexpected end of file. Expected table content.u eof-in-tableu0Unexpected end of file. Expected select content.u eof-in-selectu2Unexpected end of file. Expected frameset content.ueof-in-framesetu0Unexpected end of file. Expected script content.ueof-in-script-in-scriptu0Unexpected end of file. Expected foreign contentueof-in-foreign-landsu0Trailing solidus not allowed on element %(name)su&non-void-element-with-trailing-solidusu2Element %(name)s not allowed in a non-html contextu*unexpected-html-element-in-foreign-contentu*Unexpected end tag (%(name)s) before html.uunexpected-end-tag-before-htmlu9Element %(name)s not allowed in a inhead-noscript contextuunexpected-inhead-noscript-tagu8Unexpected end of file. Expected inhead-noscript contentueof-in-head-noscriptu@Unexpected non-space character. Expected inhead-noscript contentuchar-in-head-noscriptu0Undefined error (this sucks and should be fixed)uXXX-undefined-erroruhttp://www.w3.org/1999/xhtmluhtmlu"http://www.w3.org/1998/Math/MathMLumathmluhttp://www.w3.org/2000/svgusvguhttp://www.w3.org/1999/xlinkuxlinku$http://www.w3.org/XML/1998/namespaceuxmluhttp://www.w3.org/2000/xmlns/uxmlnsuappletucaptionumarqueeuobjectutableutduthumiumoumnumsumtextuannotation-xmlu foreignObjectudescutitleuaububigucodeuemufontuiunobrususmallustrikeustronguttuuuaddressuareauarticleuasideubaseubasefontubgsoundu blockquoteubodyubrubuttonucenterucolucolgroupucommanduddudetailsudirudivudludtuembedufieldsetufigureufooteruformuframeuframesetuh1uh2uh3uh4uh5uh6uheaduheaderuhruiframeuimageuimguinputuisindexuliulinkulistingumenuumetaunavunoembedunoframesunoscriptuolupuparamu plaintextupreuscriptusectionuselectustyleutbodyutextareautfootutheadutruuluwbruxmpu annotaion-xmlu attributeNameu attributenameu attributeTypeu attributetypeu baseFrequencyu basefrequencyu baseProfileu baseprofileucalcModeucalcmodeu clipPathUnitsu clippathunitsucontentScriptTypeucontentscripttypeucontentStyleTypeucontentstyletypeudiffuseConstantudiffuseconstantuedgeModeuedgemodeuexternalResourcesRequireduexternalresourcesrequiredu filterResu filterresu filterUnitsu filterunitsuglyphRefuglyphrefugradientTransformugradienttransformu gradientUnitsu gradientunitsu kernelMatrixu kernelmatrixukernelUnitLengthukernelunitlengthu keyPointsu keypointsu keySplinesu keysplinesukeyTimesukeytimesu lengthAdjustu lengthadjustulimitingConeAngleulimitingconeangleu markerHeightu markerheightu markerUnitsu markerunitsu markerWidthu markerwidthumaskContentUnitsumaskcontentunitsu maskUnitsu maskunitsu numOctavesu numoctavesu pathLengthu pathlengthupatternContentUnitsupatterncontentunitsupatternTransformupatterntransformu patternUnitsu patternunitsu pointsAtXu pointsatxu pointsAtYu pointsatyu pointsAtZu pointsatzu preserveAlphau preservealphaupreserveAspectRatioupreserveaspectratiouprimitiveUnitsuprimitiveunitsurefXurefxurefYurefyu repeatCountu repeatcountu repeatDuru repeatdururequiredExtensionsurequiredextensionsurequiredFeaturesurequiredfeaturesuspecularConstantuspecularconstantuspecularExponentuspecularexponentu spreadMethodu spreadmethodu startOffsetu startoffsetu stdDeviationu stddeviationu stitchTilesu stitchtilesu surfaceScaleu surfacescaleusystemLanguageusystemlanguageu tableValuesu tablevaluesutargetXutargetxutargetYutargetyu textLengthu textlengthuviewBoxuviewboxu viewTargetu viewtargetuxChannelSelectoruxchannelselectoruyChannelSelectoruychannelselectoru zoomAndPanu zoomandpanu definitionURLu definitionurluactuateu xlink:actuateuarcroleu xlink:arcroleuhrefu xlink:hrefuroleu xlink:roleushowu xlink:showu xlink:titleutypeu xlink:typeuxml:baseulanguxml:languspaceu xml:spaceu xmlns:xlinku u u u u u event-sourceusourceutracku irrelevantuuscopeduismapuautoplayucontrolsuaudiouvideoudeferuasyncuopenumultipleudisabledudatagriduhiddenucheckedudefaultunoshadeu autosubmitureadonlyuselecteduoptionuoptgroupu autofocusurequireduoutputi ii ii i& i i! ii0 i`i9 iRi}i i i i i" i i ii"!iai: iSi~ixult;ugt;uamp;uapos;uquot;uÆuAEliguAElig;u&uAMPuAMP;uÁuAacuteuAacute;uĂuAbreve;uÂuAcircuAcirc;uАuAcy;u𝔄uAfr;uÀuAgraveuAgrave;uΑuAlpha;uĀuAmacr;u⩓uAnd;uĄuAogon;u𝔸uAopf;u⁡uApplyFunction;uÅuAringuAring;u𝒜uAscr;u≔uAssign;uÃuAtildeuAtilde;uÄuAumluAuml;u∖u Backslash;u⫧uBarv;u⌆uBarwed;uБuBcy;u∵uBecause;uℬu Bernoullis;uΒuBeta;u𝔅uBfr;u𝔹uBopf;u˘uBreve;uBscr;u≎uBumpeq;uЧuCHcy;u©uCOPYuCOPY;uĆuCacute;u⋒uCap;uⅅuCapitalDifferentialD;uℭuCayleys;uČuCcaron;uÇuCcediluCcedil;uĈuCcirc;u∰uCconint;uĊuCdot;u¸uCedilla;u·u CenterDot;uCfr;uΧuChi;u⊙u CircleDot;u⊖u CircleMinus;u⊕u CirclePlus;u⊗u CircleTimes;u∲uClockwiseContourIntegral;u”uCloseCurlyDoubleQuote;u’uCloseCurlyQuote;u∷uColon;u⩴uColone;u≡u Congruent;u∯uConint;u∮uContourIntegral;uℂuCopf;u∐u Coproduct;u∳u CounterClockwiseContourIntegral;u⨯uCross;u𝒞uCscr;u⋓uCup;u≍uCupCap;uDD;u⤑u DDotrahd;uЂuDJcy;uЅuDScy;uЏuDZcy;u‡uDagger;u↡uDarr;u⫤uDashv;uĎuDcaron;uДuDcy;u∇uDel;uΔuDelta;u𝔇uDfr;u´uDiacriticalAcute;u˙uDiacriticalDot;u˝uDiacriticalDoubleAcute;u`uDiacriticalGrave;u˜uDiacriticalTilde;u⋄uDiamond;uⅆuDifferentialD;u𝔻uDopf;u¨uDot;u⃜uDotDot;u≐u DotEqual;uDoubleContourIntegral;u DoubleDot;u⇓uDoubleDownArrow;u⇐uDoubleLeftArrow;u⇔uDoubleLeftRightArrow;uDoubleLeftTee;u⟸uDoubleLongLeftArrow;u⟺uDoubleLongLeftRightArrow;u⟹uDoubleLongRightArrow;u⇒uDoubleRightArrow;u⊨uDoubleRightTee;u⇑uDoubleUpArrow;u⇕uDoubleUpDownArrow;u∥uDoubleVerticalBar;u↓u DownArrow;u⤓u DownArrowBar;u⇵uDownArrowUpArrow;ȗu DownBreve;u⥐uDownLeftRightVector;u⥞uDownLeftTeeVector;u↽uDownLeftVector;u⥖uDownLeftVectorBar;u⥟uDownRightTeeVector;u⇁uDownRightVector;u⥗uDownRightVectorBar;u⊤uDownTee;u↧u DownTeeArrow;u Downarrow;u𝒟uDscr;uĐuDstrok;uŊuENG;uÐuETHuETH;uÉuEacuteuEacute;uĚuEcaron;uÊuEcircuEcirc;uЭuEcy;uĖuEdot;u𝔈uEfr;uÈuEgraveuEgrave;u∈uElement;uĒuEmacr;u◻uEmptySmallSquare;u▫uEmptyVerySmallSquare;uĘuEogon;u𝔼uEopf;uΕuEpsilon;u⩵uEqual;u≂u EqualTilde;u⇌u Equilibrium;uℰuEscr;u⩳uEsim;uΗuEta;uËuEumluEuml;u∃uExists;uⅇu ExponentialE;uФuFcy;u𝔉uFfr;u◼uFilledSmallSquare;u▪uFilledVerySmallSquare;u𝔽uFopf;u∀uForAll;uℱu Fouriertrf;uFscr;uЃuGJcy;u>uGTuGT;uΓuGamma;uϜuGammad;uĞuGbreve;uĢuGcedil;uĜuGcirc;uГuGcy;uĠuGdot;u𝔊uGfr;u⋙uGg;u𝔾uGopf;u≥u GreaterEqual;u⋛uGreaterEqualLess;u≧uGreaterFullEqual;u⪢uGreaterGreater;u≷u GreaterLess;u⩾uGreaterSlantEqual;u≳u GreaterTilde;u𝒢uGscr;u≫uGt;uЪuHARDcy;uˇuHacek;u^uHat;uĤuHcirc;uℌuHfr;uℋu HilbertSpace;uℍuHopf;u─uHorizontalLine;uHscr;uĦuHstrok;u HumpDownHump;u≏u HumpEqual;uЕuIEcy;uIJuIJlig;uЁuIOcy;uÍuIacuteuIacute;uÎuIcircuIcirc;uИuIcy;uİuIdot;uℑuIfr;uÌuIgraveuIgrave;uIm;uĪuImacr;uⅈu ImaginaryI;uImplies;u∬uInt;u∫u Integral;u⋂u Intersection;u⁣uInvisibleComma;u⁢uInvisibleTimes;uĮuIogon;u𝕀uIopf;uΙuIota;uℐuIscr;uĨuItilde;uІuIukcy;uÏuIumluIuml;uĴuJcirc;uЙuJcy;u𝔍uJfr;u𝕁uJopf;u𝒥uJscr;uЈuJsercy;uЄuJukcy;uХuKHcy;uЌuKJcy;uΚuKappa;uĶuKcedil;uКuKcy;u𝔎uKfr;u𝕂uKopf;u𝒦uKscr;uЉuLJcy;u⃒unvgt;u⧞unvinfin;u⤂unvlArr;u≤⃒unvle;u<⃒unvlt;u⊴⃒unvltrie;u⤃unvrArr;u⊵⃒unvrtrie;u∼⃒unvsim;u⇖unwArr;u⤣unwarhk;unwarr;unwarrow;u⤧unwnear;uoS;uóuoacuteuoacute;uoast;uocir;uôuocircuocirc;uоuocy;uodash;uőuodblac;u⨸uodiv;uodot;u⦼uodsold;uœuoelig;u⦿uofcir;u𝔬uofr;u˛uogon;uòuograveuograve;u⧁uogt;u⦵uohbar;uohm;uoint;uolarr;u⦾uolcir;u⦻uolcross;uoline;u⧀uolt;uōuomacr;uωuomega;uοuomicron;u⦶uomid;uominus;u𝕠uoopf;u⦷uopar;u⦹uoperp;uoplus;u∨uor;uorarr;u⩝uord;uℴuorder;uorderof;uªuordfuordf;uºuordmuordm;u⊶uorigof;u⩖uoror;u⩗uorslope;u⩛uorv;uoscr;uøuoslashuoslash;u⊘uosol;uõuotildeuotilde;uotimes;u⨶u otimesas;uöuoumluouml;u⌽uovbar;upar;u¶uparaupara;u parallel;u⫳uparsim;u⫽uparsl;upart;uпupcy;u%upercnt;u.uperiod;u‰upermil;uperp;u‱upertenk;u𝔭upfr;uφuphi;uϕuphiv;uphmmat;u☎uphone;uπupi;u pitchfork;uϖupiv;uplanck;uℎuplanckh;uplankv;u+uplus;u⨣u plusacir;uplusb;u⨢upluscir;uplusdo;u⨥uplusdu;u⩲upluse;uplusmnuplusmn;u⨦uplussim;u⨧uplustwo;upm;u⨕u pointint;u𝕡upopf;u£upoundupound;upr;u⪳uprE;u⪷uprap;uprcue;upre;uprec;u precapprox;u preccurlyeq;upreceq;u⪹u precnapprox;u⪵u precneqq;u⋨u precnsim;uprecsim;u′uprime;uprimes;uprnE;uprnap;uprnsim;uprod;u⌮u profalar;u⌒u profline;u⌓u profsurf;uprop;upropto;uprsim;u⊰uprurel;u𝓅upscr;uψupsi;u upuncsp;u𝔮uqfr;uqint;u𝕢uqopf;u⁗uqprime;u𝓆uqscr;u quaternions;u⨖uquatint;u?uquest;uquesteq;uquoturAarr;urArr;u⤜urAtail;urBarr;u⥤urHar;u∽̱urace;uŕuracute;uradic;u⦳u raemptyv;urang;u⦒urangd;u⦥urange;urangle;u»uraquouraquo;urarr;u⥵urarrap;urarrb;u⤠urarrbfs;u⤳urarrc;u⤞urarrfs;urarrhk;urarrlp;u⥅urarrpl;u⥴urarrsim;u↣urarrtl;u↝urarrw;u⤚uratail;u∶uratio;u rationals;urbarr;u❳urbbrk;u}urbrace;u]urbrack;u⦌urbrke;u⦎urbrksld;u⦐urbrkslu;uřurcaron;uŗurcedil;urceil;urcub;uрurcy;u⤷urdca;u⥩urdldhar;urdquo;urdquor;u↳urdsh;ureal;urealine;u realpart;ureals;u▭urect;uregureg;u⥽urfisht;urfloor;u𝔯urfr;urhard;urharu;u⥬urharul;uρurho;uϱurhov;u rightarrow;urightarrowtail;urightharpoondown;urightharpoonup;urightleftarrows;urightleftharpoons;u⇉urightrightarrows;urightsquigarrow;u⋌urightthreetimes;u˚uring;u risingdotseq;urlarr;urlhar;u‏urlm;u⎱urmoust;u rmoustache;u⫮urnmid;u⟭uroang;u⇾uroarr;urobrk;u⦆uropar;u𝕣uropf;u⨮uroplus;u⨵urotimes;u)urpar;u⦔urpargt;u⨒u rppolint;urrarr;u›ursaquo;u𝓇urscr;ursh;ursqb;ursquo;ursquor;urthree;u⋊urtimes;u▹urtri;urtrie;urtrif;u⧎u rtriltri;u⥨uruluhar;u℞urx;uśusacute;usbquo;usc;u⪴uscE;u⪸uscap;ušuscaron;usccue;usce;uşuscedil;uŝuscirc;u⪶uscnE;u⪺uscnap;u⋩uscnsim;u⨓u scpolint;uscsim;uсuscy;u⋅usdot;usdotb;u⩦usdote;u⇘useArr;usearhk;usearr;usearrow;u§usectusect;u;usemi;u⤩useswar;u setminus;usetmn;u✶usext;u𝔰usfr;usfrown;u♯usharp;uщushchcy;uшushcy;u shortmid;ushortparallel;u­ushyushy;uσusigma;uςusigmaf;usigmav;usim;u⩪usimdot;usime;usimeq;u⪞usimg;u⪠usimgE;u⪝usiml;u⪟usimlE;u≆usimne;u⨤usimplus;u⥲usimrarr;uslarr;usmallsetminus;u⨳usmashp;u⧤u smeparsl;usmid;u⌣usmile;u⪪usmt;u⪬usmte;u⪬︀usmtes;uьusoftcy;u/usol;u⧄usolb;u⌿usolbar;u𝕤usopf;u♠uspades;u spadesuit;uspar;usqcap;u⊓︀usqcaps;usqcup;u⊔︀usqcups;usqsub;usqsube;u sqsubset;u sqsubseteq;usqsup;usqsupe;u sqsupset;u sqsupseteq;usqu;usquare;usquarf;usquf;usrarr;u𝓈usscr;ussetmn;ussmile;usstarf;u☆ustar;ustarf;ustraightepsilon;u straightphi;ustrns;u⊂usub;u⫅usubE;u⪽usubdot;usube;u⫃usubedot;u⫁usubmult;u⫋usubnE;u⊊usubne;u⪿usubplus;u⥹usubrarr;usubset;u subseteq;u subseteqq;u subsetneq;u subsetneqq;u⫇usubsim;u⫕usubsub;u⫓usubsup;usucc;u succapprox;u succcurlyeq;usucceq;u succnapprox;u succneqq;u succnsim;usuccsim;usum;u♪usung;u¹usup1usup1;u²usup2usup2;u³usup3usup3;usup;u⫆usupE;u⪾usupdot;u⫘usupdsub;usupe;u⫄usupedot;u⟉usuphsol;u⫗usuphsub;u⥻usuplarr;u⫂usupmult;u⫌usupnE;u⊋usupne;u⫀usupplus;usupset;u supseteq;u supseteqq;u supsetneq;u supsetneqq;u⫈usupsim;u⫔usupsub;u⫖usupsup;u⇙uswArr;uswarhk;uswarr;uswarrow;u⤪uswnwar;ußuszliguszlig;u⌖utarget;uτutau;utbrk;uťutcaron;uţutcedil;uтutcy;utdot;u⌕utelrec;u𝔱utfr;uthere4;u therefore;uθutheta;uϑu thetasym;uthetav;u thickapprox;u thicksim;uthinsp;uthkap;uthksim;uþuthornuthorn;utilde;u×utimesutimes;utimesb;u⨱u timesbar;u⨰utimesd;utint;utoea;utop;u⌶utopbot;u⫱utopcir;u𝕥utopf;u⫚utopfork;utosa;u‴utprime;utrade;u▵u triangle;u triangledown;u triangleleft;utrianglelefteq;u≜u triangleq;utriangleright;utrianglerighteq;u◬utridot;utrie;u⨺u triminus;u⨹utriplus;u⧍utrisb;u⨻utritime;u⏢u trpezium;u𝓉utscr;uцutscy;uћutshcy;uŧutstrok;utwixt;utwoheadleftarrow;utwoheadrightarrow;uuArr;u⥣uuHar;uúuuacuteuuacute;uuarr;uўuubrcy;uŭuubreve;uûuucircuucirc;uуuucy;uudarr;uűuudblac;uudhar;u⥾uufisht;u𝔲uufr;uùuugraveuugrave;uuharl;uuharr;u▀uuhblk;u⌜uulcorn;u ulcorner;u⌏uulcrop;u◸uultri;uūuumacr;uumluuml;uųuuogon;u𝕦uuopf;uuparrow;u updownarrow;uupharpoonleft;uupharpoonright;uuplus;uυuupsi;uupsih;uupsilon;u⇈u upuparrows;u⌝uurcorn;u urcorner;u⌎uurcrop;uůuuring;u◹uurtri;u𝓊uuscr;u⋰uutdot;uũuutilde;uutri;uutrif;uuuarr;uüuuumluuuml;u⦧uuwangle;uvArr;u⫨uvBar;u⫩uvBarv;uvDash;u⦜uvangrt;u varepsilon;u varkappa;u varnothing;uvarphi;uvarpi;u varpropto;uvarr;uvarrho;u varsigma;u⊊︀u varsubsetneq;u⫋︀uvarsubsetneqq;u⊋︀u varsupsetneq;u⫌︀uvarsupsetneqq;u vartheta;uvartriangleleft;uvartriangleright;uвuvcy;uvdash;uvee;u⊻uveebar;u≚uveeeq;u⋮uvellip;uverbar;uvert;u𝔳uvfr;uvltri;uvnsub;uvnsup;u𝕧uvopf;uvprop;uvrtri;u𝓋uvscr;uvsubnE;uvsubne;uvsupnE;uvsupne;u⦚uvzigzag;uŵuwcirc;u⩟uwedbar;uwedge;u≙uwedgeq;u℘uweierp;u𝔴uwfr;u𝕨uwopf;uwp;uwr;uwreath;u𝓌uwscr;uxcap;uxcirc;uxcup;uxdtri;u𝔵uxfr;uxhArr;uxharr;uξuxi;uxlArr;uxlarr;uxmap;u⋻uxnis;uxodot;u𝕩uxopf;uxoplus;uxotime;uxrArr;uxrarr;u𝓍uxscr;uxsqcup;uxuplus;uxutri;uxvee;uxwedge;uýuyacuteuyacute;uяuyacy;uŷuycirc;uыuycy;u¥uyenuyen;u𝔶uyfr;uїuyicy;u𝕪uyopf;u𝓎uyscr;uюuyucy;uÿuyumluyuml;uźuzacute;užuzcaron;uзuzcy;użuzdot;uzeetrf;uζuzeta;u𝔷uzfr;uжuzhcy;u⇝uzigrarr;u𝕫uzopf;u𝓏uzscr;u‍uzwj;u‌uzwnj;u�i iuiiiiiiiiiiiiuiiuiuiiiiiiiiiiiiiuiiiuDoctypeiu CharactersiuSpaceCharactersiuStartTagiuEndTagiuEmptyTagiuCommentiu ParseErrorumathtDataLossWarningcB`seZRS((t__name__t __module__(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyR| stReparseExceptioncB`seZRS((RR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyR s(uh1uh2uh3uh4uh5uh6( i ii ii i& i i! ii0 i`i9 iRii}iii i i i i" i i ii"!iai: iSii~ix(:t __future__RRRtstringtNonetEOFtEt namespacest frozensettscopingElementstformattingElementstspecialElementsthtmlIntegrationPointElementst"mathmlTextIntegrationPointElementstadjustSVGAttributestadjustMathMLAttributestadjustForeignAttributestdicttitemstqnametprefixtlocaltnstunadjustForeignAttributestspaceCharactersttableInsertModeElementstascii_lowercasetasciiLowercasetascii_uppercasetasciiUppercaset ascii_letterst asciiLetterstdigitst hexdigitst hexDigitstctordtlowertasciiUpper2LowertheadingElementst voidElementst cdataElementstrcdataElementstbooleanAttributestentitiesWindows1252t xmlEntitiestentitiestreplacementCharacterst tokenTypest tagTokenTypestktvtprefixest UserWarningRt ExceptionR(((sB/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pytsL                                                                                                                      :  4     1 PK.e[|:html5lib/html5parser.pycnu[ abc@`sddlmZmZmZddlmZmZmZddlZyddl m Z Wn!e k r{ddl m Z nXddl mZddl mZddl mZdd lmZdd l mZdd lmZmZmZmZmZmZmZmZmZmZmZm Z!m"Z"m#Z#m$Z$m%Z%d e&d Z'dd e&dZ(dZ)de*fdYZ+ej,dZ-dZ.de/e0dZ1de2fdYZ3dS(i(tabsolute_importtdivisiontunicode_literals(twith_metaclasstviewkeystPY3N(t OrderedDicti(t _inputstream(t _tokenizer(t treebuilders(tMarker(t_utils(tspaceCharacterstasciiUpper2LowertspecialElementstheadingElementst cdataElementstrcdataElementst tokenTypest tagTokenTypest namespacesthtmlIntegrationPointElementst"mathmlTextIntegrationPointElementstadjustForeignAttributestadjustMathMLAttributestadjustSVGAttributestEtReparseExceptionuetreecK`s1tj|}t|d|}|j||S(u.Parse a string or file-like object into a treetnamespaceHTMLElements(R tgetTreeBuildert HTMLParsertparse(tdoct treebuilderRtkwargsttbtp((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRsudivcK`s7tj|}t|d|}|j|d||S(NRt container(R RRt parseFragment(R R%R!RR"R#R$((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR&&sc`s dtffdY}|S(Nt Decoratedc`seZfdZRS(c`s^xE|jD]7\}}t|tjr:|}n|||tphasetinsertHtmlElementtresetInsertionModeR9t lastPhasetbeforeRCDataPhasetTruet framesetOK(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRI^s*           cC`s't|dsdS|jjjdjS(uThe name of the character encoding that was used to decode the input stream, or :obj:`None` if that is not determined yet. u tokenizeriN(thasattrR9RHRKt charEncodingRA(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytdocumentEncodingscC`se|jdkrK|jtdkrKd|jkoJ|jdjtdkS|j|jftkSdS(Nuannotation-xmlumathmluencodingu text/htmluapplication/xhtml+xml(u text/htmluapplication/xhtml+xml(RAt namespaceRt attributest translateR R(R?telement((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisHTMLIntegrationPoints cC`s|j|jftkS(N(RaRAR(R?Rd((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisMathMLTextIntegrationPointscC`sBtd}td}td}td}td}td}td}x|jD]}d} |} x=| dk r| } |jjr|jjdnd} | r| jnd} | r| jnd} | d }||kr|j| d | jd id} qht |jjd ks| |jj ks|j | rx||krf|d t ddgks|||fks| t dkr| dkr||kr|d dks|j| r||||fkr|j}n |jd}||kr |j| } qh||kr)|j| } qh||krG|j| } qh||kre|j| } qh||kr|j| } qh||krh|j| } qhqhW||krS| drS| d rS|jdi| d d 6qSqSWt}g}xG|r=|j|j|jj}|r|j|ks:tqqWdS(Nu CharactersuSpaceCharactersuStartTaguEndTaguCommentuDoctypeu ParseErroriutypeudataudatavarsiunameumglyphu malignmarkumathmluannotation-xmlusvguinForeignContentu selfClosinguselfClosingAcknowledgedu&non-void-element-with-trailing-solidus(RtnormalizedTokensR9R:t openElementsRaRAt parseErrortgettlentdefaultNamespaceRft frozensetRReRWR>tprocessCharacterstprocessSpaceCharacterstprocessStartTagt processEndTagtprocessCommenttprocessDoctypeR\tappendt processEOFtAssertionError(R?tCharactersTokentSpaceCharactersTokent StartTagTokent EndTagTokent CommentTokent DoctypeTokentParseErrorTokenttokent prev_tokent new_tokent currentNodetcurrentNodeNamespacetcurrentNodeNameR,RWt reprocessR>((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRJsp       "                  cc`s&x|jD]}|j|Vq WdS(N(RHtnormalizeToken(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRgscO`s&|j|td|||jjS(uParse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on N(RMRNR9R:t getDocument(R?RKtargsR"((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cO`s#|j|t|||jjS(u2Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on (RMR\R:t getFragment(R?RKRR"((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR&suXXX-undefined-errorcC`s^|dkri}n|jj|jjj||f|jrZtt||ndS(N( R9R;RtRHRKtpositionR8t ParseErrorR(R?t errorcodetdatavars((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRis   % cC`sr|dtdkrn|d}t||dRW(R?tlasttnewModestnodetnodeNamet new_phase((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRY!sD       cC`su|dkst|jj||dkrC|jj|j_n|jj|j_|j|_|j d|_dS(uYGeneric RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT uRAWTEXTuRCDATAutextN(uRAWTEXTuRCDATA( RvR:t insertElementRHRURTRSRWt originalPhaseR>(R?R~t contentType((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytparseRCDataRawtextMs  N(R5R6t__doc__R9RNR\RCRMRItpropertyR`ReRfRJRgRR&RiRRRRRRYR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8s& "  C        ,c`sTd}d}dt|||fdYdfdY}dfdY}d ffd Y}d ffd Y}d ffdY}dffdY}dffdY} dffdY} dffdY} dffdY} dffdY} dffdY}dffdY}dffd Y}d!ffd"Y}d#ffd$Y}d%ffd&Y}d'ffd(Y}d)ffd*Y}d+ffd,Y}d-ffd.Y}d/ffd0Y}d1ffd2Y}i|d36|d46|d56|d66|d76|d86| d96| d:6| d;6| d<6| d=6|d>6|d?6|d@6|dA6|dB6|dC6|dD6|dE6|dF6|dG6|dH6|dI6S(JNc`s2tdtjDfd}|S(u4Logger that records which phase processes each tokencs`s!|]\}}||fVqdS(N((t.0tkeytvalue((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pys csc`sjjdrt|dkr|d}yi|dd6}Wn nX|dtkru|d|dRW(R?R~RAtpublicIdtsystemIdtcorrect((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRss              cS`s&d|j_|jjd|j_dS(Nuquirksu beforeHtml(RDRQR>RW(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt anythingElses cS`s|jjd|j|S(Nuexpected-doctype-but-got-chars(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s,|jjdi|dd6|j|S(Nu"expected-doctype-but-got-start-taguname(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRps  cS`s,|jjdi|dd6|j|S(Nu expected-doctype-but-got-end-taguname(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqs  cS`s|jjd|jtS(Nuexpected-doctype-but-got-eof(RDRiRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu%s ( R5R6RoRrRsRRnRpRqRu(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs   _    tBeforeHtmlPhasecB`sGeZdZdZdZdZdZdZdZRS(cS`s3|jjtdd|jjd|j_dS(NuhtmluStartTagu beforeHead(R:t insertRoottimpliedTagTokenRDR>RW(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRX,scS`s|jtS(N(RXR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu1s cS`s|jj||jjdS(N(R:RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr5scS`sdS(N((R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo8scS`s|j|S(N(RX(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn;s cS`s-|ddkrt|j_n|j|S(Nunameuhtml(R\RDRORX(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRp?s cS`sC|ddkr1|jjdi|dd6n|j|SdS(Nunameuheadubodyuhtmlubruunexpected-end-tag-before-html(uheadubodyuhtmlubr(RDRiRX(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqEs   ( R5R6RXRuRrRoRnRpRq(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR*s      tBeforeHeadPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`s}j|||tjd|jfd|jfg|_|j|j_tjd|jfg|_ |j |j _dS(Nuhtmluheadubodyubr(uheadubodyuhtmlubr( RCR tMethodDispatcherRt startTagHeadRt startTagOthertdefaulttendTagImplyHeadRt endTagOther(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCNs cS`s|jtddtS(NuheaduStartTag(RRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu\scS`sdS(N((R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo`scS`s|jtdd|S(NuheaduStartTag(RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRncscS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRgscS`s@|jj||jjd|j_|jjd|j_dS(NiuinHead(R:RRht headPointerRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRjscS`s|jtdd|S(NuheaduStartTag(RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jtdd|S(NuheaduStartTag(RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRsscS`s"|jjdi|dd6dS(Nuend-tag-after-implied-rootuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRws ( R5R6RCRuRoRnRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRMs       t InHeadPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZRS(c `sj|||tjd|jfd|jfd|jfd|jfd|jfd|jfd |j fd |j fg|_ |j |j _ tjd |jfd|jfg|_|j|j_ dS(Nuhtmlutitleunoframesustyleunoscriptuscriptubaseubasefontubgsounducommandulinkumetauheadubrubody(unoframesustyle(ubaseubasefontubgsounducommandulink(ubruhtmlubody(RCR RRt startTagTitletstartTagNoFramesStyletstartTagNoscripttstartTagScripttstartTagBaseLinkCommandt startTagMetaRRRRt endTagHeadtendTagHtmlBodyBrRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC|s         cS`s|jtS(N(RR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjddS(Nu!two-heads-are-not-better-than-one(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s.|jj||jjjt|dRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sT|jj||jjj|jj_|jj|j_|jjd|j_dS(Nutext( R:RRDRHtscriptDataStateRTRWRR>(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sQ|jjjj}|jdks7td|j|jjd|j_dS(NuheaduExpected head got %su afterHead(RDR:RhRRARvR>RW(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs"cS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jtddS(Nuhead(RR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRuRnRRRRRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR{s               tInHeadNoscriptPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c`sj|||tjd|jfd |jfd |jfg|_|j|j_tjd |j fd |j fg|_ |j |j _dS( Nuhtmlubasefontubgsoundulinkumetaunoframesustyleuheadunoscriptubr(ubasefontubgsoundulinkumetaunoframesustyle(uheadunoscript( RCR RRRtstartTagHeadNoscriptRRRtendTagNoscripttendTagBrRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs   cS`s|jjd|jtS(Nueof-in-head-noscript(RDRiRR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s|jjdj|S(NuinHead(RDR>Rr(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRrscS`s|jjd|j|S(Nuchar-in-head-noscript(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s|jjdj|S(NuinHead(RDR>Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s"|jjdi|dd6dS(Nuunexpected-start-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s,|jjdi|dd6|j|S(Nuunexpected-inhead-noscript-taguname(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`sQ|jjjj}|jdks7td|j|jjd|j_dS(NunoscriptuExpected noscript got %suinHead(RDR:RhRRARvR>RW(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs"cS`s,|jjdi|dd6|j|S(Nuunexpected-inhead-noscript-taguname(RDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jtddS(Nunoscript(RR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRuRrRnRoRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs           tAfterHeadPhasec`szeZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z RS( c `sj|||tjd|jfd|jfd|jfd|jfd |jfg|_|j |j_ tjd|j fg|_ |j |j _ dS(Nuhtmlubodyuframesetubaseubasefontubgsoundulinkumetaunoframesuscriptustyleutitleuheadubr( ubaseubasefontubgsoundulinkumetaunoframesuscriptustyleutitle(ubodyuhtmlubr(RCR RRt startTagBodytstartTagFramesettstartTagFromHeadRRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC#s     cS`s|jtS(N(RR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu4s cS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn8s cS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<scS`s6t|j_|jj||jjd|j_dS(NuinBody(RNRDR]R:RR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR?s cS`s*|jj||jjd|j_dS(Nu inFrameset(R:RRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRDscS`s|jjdi|dd6|jjj|jj|jjdj|xG|jjdddD],}|jdkrh|jjj |PqhqhWdS(Nu#unexpected-start-tag-out-of-my-headunameuinHeadiuhead( RDRiR:RhRtRR>RpRAtremove(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRHs  cS`s"|jjdi|dd6dS(Nuunexpected-start-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRRscS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRUs cS`s|j|S(N(R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRYs cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]scS`s?|jjtdd|jjd|j_t|j_dS(NubodyuStartTaguinBody(R:RRRDR>RWR\R](R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR`s(R5R6RCRuRnRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR"s         t InBodyPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZ dZ!d Z"d!Z#d"Z$d#Z%d$Z&d%Z'd&Z(d'Z)d(Z*d)Z+d*Z,d+Z-d,Z.d-Z/d.Z0d/Z1d0Z2d1Z3d2Z4RS(3c,`sij||||j|_tjd|jfdd|jfd |jfd |jfde|j ft |j fdf|j fd&|j fdg|jfd*|jfd+|jfdh|jfd8|jfd9|jfdi|jfd=|jfd>|jfdj|jfdk|jfdH|jfdI|jfdJ|jfdK|jfdL|jfdM|jfdN|jfdl|j fdQ|j!fdm|j"fdn|j#fdV|j$fdW|j%fdo|j&fg!|_'|j(|j'_)tjd |j*fd|j+fdp|j,fd&|j-fd |j.fdq|j/ft |j0fdr|j1fds|j2fd@|j3fg |_4|j5|j4_)dS(tNuhtmlubaseubasefontubgsounducommandulinkumetauscriptustyleutitleubodyuframesetuaddressuarticleuasideu blockquoteucenterudetailsudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupumainumenuunavuolupusectionusummaryuulupreulistinguformuliuddudtu plaintextuaububigucodeuemufontuiususmallustrikeustronguttuuunobrubuttonuappletumarqueeuobjectuxmputableuareaubruembeduimgukeygenuwbruparamusourceutrackuinputuhruimageuisindexutextareauiframeunoscriptunoembedunoframesuselecturpurtuoptionuoptgroupumathusvgucaptionucolucolgroupuframeuheadutbodyutdutfootuthutheadutrudialog( ubaseubasefontubgsounducommandulinkumetauscriptustyleutitle(uaddressuarticleuasideu blockquoteucenterudetailsudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupumainumenuunavuolupusectionusummaryuul(upreulisting(uliuddudt( ububigucodeuemufontuiususmallustrikeustronguttuu(uappletumarqueeuobject(uareaubruembeduimgukeygenuwbr(uparamusourceutrack(unoembedunoframes(urpurt(uoptionuoptgroup( ucaptionucolucolgroupuframeuheadutbodyutdutfootuthutheadutr(uaddressuarticleuasideu blockquoteubuttonucenterudetailsudialogudirudivudlufieldsetu figcaptionufigureufooteruheaderuhgroupulistingumainumenuunavuolupreusectionusummaryuul(uddudtuli(uaububigucodeuemufontuiunobrususmallustrikeustronguttuu(uappletumarqueeuobject(6RCtprocessSpaceCharactersNonPreRoR RRtstartTagProcessInHeadRRtstartTagClosePRtstartTagHeadingtstartTagPreListingt startTagFormtstartTagListItemtstartTagPlaintextt startTagAtstartTagFormattingt startTagNobrtstartTagButtontstartTagAppletMarqueeObjectt startTagXmpt startTagTabletstartTagVoidFormattingtstartTagParamSourcet startTagInputt startTagHrt startTagImagetstartTagIsIndextstartTagTextareatstartTagIFrameRtstartTagRawtexttstartTagSelectt startTagRpRtt startTagOptt startTagMatht startTagSvgtstartTagMisplacedRRRt endTagBodyt endTagHtmlt endTagBlockt endTagFormtendTagPtendTagListItemt endTagHeadingtendTagFormattingtendTagAppletMarqueeObjectRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRChs~                                          cS`s4|j|jko3|j|jko3|j|jkS(N(RARaRb(R?tnode1tnode2((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytisMatchingFormattingElementscS`s|jj||jjd}g}xS|jjdddD]8}|tkrVPq@|j||r@|j|q@q@Wt|dkstt|dkr|jjj |dn|jjj|dS(Nii( R:RRhtactiveFormattingElementsR RRtRkRvR(R?R~RdtmatchingElementsR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytaddFormattingElements  c S`sWtd}xD|jjddd D])}|j|kr&|jjdPq&q&WdS(Nuddudtuliuputbodyutdutfootuthutheadutrubodyuhtmliu expected-closing-tag-but-got-eof( uddudtuliuputbodyutdutfootuthutheadutrubodyuhtml(RmR:RhRARDRi(R?tallowed_elementsR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus  cS`s|d}|j|_|jdrb|jjdjdkrb|jjdj rb|d}n|r|jj|jj|ndS( Nudatau iupreulistingutextareai(upreulistingutextarea( RRoRR:RhRAt hasContentt#reconstructActiveFormattingElementsR(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt!processSpaceCharactersDropNewlines    cS`s}|ddkrdS|jj|jj|d|jjrytg|dD]}|tk^qOryt|j_ndS(Nudatau(R:R RRDR]tanyR RN(R?R~tchar((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns  #cS`s%|jj|jj|ddS(Nudata(R:R R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jjdidd6t|jjdksK|jjdjdkr`|jjstn`t|j_ xQ|dj D]?\}}||jjdj kr}||jjdj |RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs1 'cS`sB|jjdddr.|jtdn|jj|dS(Nuptvariantubutton(R:telementInScopeRRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`sZ|jjdddr.|jtdn|jj|t|j_|j|_ dS(NupRubutton( R:RRRRRNRDR]R Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`s|jjr)|jjdidd6nT|jjdddrW|jtdn|jj||jjd|j_dS(Nuunexpected-start-taguformunameupRubuttoni( R:t formPointerRDRiRRRRRh(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`st|j_idgd6ddgd6ddgd6}||d}xnt|jjD]Z}|j|kr|jjjt |jdPn|j t krW|jd krWPqWqWW|jj dd d r|jjjt ddn|jj |dS( NuliudtuddunameuEndTaguaddressudivupRubutton(uaddressudivup(RNRDR]treversedR:RhRARWRqRt nameTupleRRR(R?R~t stopNamesMapt stopNamesR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs"     cS`sZ|jjdddr.|jtdn|jj||jjj|jj_dS(NupRubutton( R:RRRRRDRHRVRT(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR4scS`s|jjdddr.|jtdn|jjdjtkrx|jjdi|dd6|jjj n|jj |dS(NupRubuttoniuunexpected-start-taguname( R:RRRRhRARRDRiRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR:s cS`s|jjd}|r|jjdidd6dd6|jtd||jjkrt|jjj|n||jjkr|jjj|qn|jj |j |dS(Nuau$unexpected-start-tag-implies-end-tagu startNameuendName( R:t!elementInActiveFormattingElementsRDRiRRRhRRR R (R?R~t afeAElement((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRBs  cS`s|jj|j|dS(N(R:R R (R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyROs cS`st|jj|jjdrc|jjdidd6dd6|jtd|jjn|j|dS(Nunobru$unexpected-start-tag-implies-end-tagu startNameuendName(R:R RRDRiRqRR (R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRSs  cS`sw|jjdrJ|jjdidd6dd6|jtd|S|jj|jj|t|j_ dS(Nubuttonu$unexpected-start-tag-implies-end-tagu startNameuendName( R:RRDRiRqRR RRNR](R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]s  cS`s@|jj|jj||jjjtt|j_dS(N( R:R RRRtR RNRDR](R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRhs cS`s^|jjdddr.|jtdn|jjt|j_|jj|ddS(NupRubuttonuRAWTEXT( R:RRRR RNRDR]R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns   cS`sy|jjdkrC|jjdddrC|jtdqCn|jj|t|j_|jj d|j_ dS(NuquirksupRubuttonuinTable( RDRQR:RRqRRRNR]R>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus  cS`sG|jj|jj||jjjt|d(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs      cS`s_|jjdrK|jj|jjdjdkrK|jjqKn|jj|dS(Nurubyi(R:RtgenerateImpliedEndTagsRhRARDRiR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs  cS`sv|jj|jj||jj|td|d<|jj||drr|jjjt |dRW(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR!s     cS`s-|jjdr)|jtd|SdS(Nubody(R:RRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR3scS`s|ddkr|j|_n|jj|d}|rK|jjn|jjdj|dkr|jjdi|dd6n|r|jjj }x,|j|dkr|jjj }qWndS(Nunameupreiuend-tag-too-early( RRoR:RRRhRARDRiR(R?R~tinScopeR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR9s!cS`s|jj}d|j_|dks7|jj| rT|jjdidd6nS|jj|jjd|kr|jjdidd6n|jjj|dS(Nuunexpected-end-taguformunameiuend-tag-too-early-ignored( R:RR9RRDRiRRhR(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRGs     cS`s|ddkrd}nd}|jj|dd|s\|jjdi|dd6n|jjd|d|jjdj|dkr|jjdi|dd6n|jjj}x)|j|dkr|jjj}qWdS( NunameuliulistRuunexpected-end-tagtexcludeiuend-tag-too-early( R9R:RRDRiRRhRAR(R?R~RR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRTs ! cS`sx1tD])}|jj|r|jjPqqW|jjdj|dkrr|jjdi|dd6nx^tD]V}|jj|ry|jjj}x%|jtkr|jjj}qWPqyqyWdS(Niunameuend-tag-too-early( RR:RRRhRARDRiR(R?R~titem((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRes  ! cS`s3d}x&|dkr.|d7}|jj|d}| sd||jjkru|jj|j ru|j|dS||jjkr|jjdi|dd6|jjj |dS|jj|js|jjdi|dd6dS||jjdkr*|jjd i|dd6n|jjj |}d}x1|jj|D]}|j t krV|}PqVqVW|dkr|jjj}x"||kr|jjj}qW|jjj |dS|jj|d}|jjj |}|} } d} |jjj | } x| d kr9| d7} | d8} |jj| } | |jjkr|jjj | q$n| |krPn| |kr|jjj | d}n| j} | |jj|jjj | <| |jj|jjj | <| } | jr#| jj| n| j| | } q$W| jrV| jj| n|jtdkr|jj\}}|j| |n |j| |j} |j| |j| |jjj ||jjj|| |jjj ||jjj|jjj |d| q WdS(u)The much-feared adoption agency algorithmiiiunameNuadoption-agency-1.2uadoption-agency-4.4iuadoption-agency-1.3iutableutbodyutfootutheadutr(utableutbodyutfootutheadutr(R:RRhRRARRDRiRRtindexR9RRRt cloneNodeRRt appendChildRmtgetTableMisnestedNodePositiont insertBeforetreparentChildrentinsert(R?R~touterLoopCountertformattingElementtafeIndext furthestBlockRdtcommonAncestortbookmarktlastNodeRtinnerLoopCounterR#tcloneRR'((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRts   !                cS`s|jj|dr&|jjn|jjdj|dkrd|jjdi|dd6n|jj|dr|jjj}x)|j|dkr|jjj}qW|jjndS(Nunameiuend-tag-too-early( R:RRRhRARDRiRtclearActiveFormattingElements(R?R~Rd((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs!cS`s[|jjdidd6dd6|jj|jjtdd|jjjdS(Nuunexpected-end-tag-treated-asubru originalNameu br elementunewNameuStartTag(RDRiR:R RRRhR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR#s   cS`sx|jjdddD]}|j|dkr|jjd|d|jjdj|dkr|jjdi|dd6nx|jjj|krqWPq|jtkr|jjdi|dd6PqqWdS(NiunameR!uunexpected-end-tag( R:RhRARRDRiRRR(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR*s !(5R5R6RCRR RuR RnRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyResfG                                  t TextPhasec`sDeZfdZdZdZdZdZdZRS(c`sej|||tjg|_|j|j_tjd|jfg|_|j|j_dS(Nuscript( RCR RRRRt endTagScriptRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC9s cS`s|jj|ddS(Nudata(R:R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnAscS`sM|jjdi|jjdjd6|jjj|jj|j_tS(Nu&expected-named-closing-tag-but-got-eofiuname( RDRiR:RhRARRRWR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuDs  cS`ststd|ddS(Nu4Tried to process start tag %s in RCDATA/RAWTEXT modeuname(RNRv(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRKscS`s=|jjj}|jdks't|jj|j_dS(Nuscript(R:RhRRARvRDRRW(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR5NscS`s&|jjj|jj|j_dS(N(R:RhRRDRRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRUs(R5R6RCRnRuRR5R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR48s     t InTablePhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZdZdZRS(c `sj|||tjd|jfd|jfd|jfd|jfd|jfd|jfd |j fd|j fd|j fd|j fg |_ |j|j _tjd |jfd|jfg|_|j|j_dS(Nuhtmlucaptionucolgroupucolutbodyutfootutheadutduthutrutableustyleuscriptuinputuformubody(utbodyutfootuthead(utduthutr(ustyleuscript( ubodyucaptionucolucolgroupuhtmlutbodyutdutfootuthutheadutr(RCR RRtstartTagCaptiontstartTagColgroupt startTagColtstartTagRowGrouptstartTagImplyTbodyRtstartTagStyleScriptRRRRRt endTagTablet endTagIgnoreRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC[s$          cS`s4x-|jjdjdkr/|jjjqWdS(Niutableuhtml(utableuhtml(R:RhRAR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableContextsscS`sB|jjdjdkr,|jjdn|jjs>tdS(Niuhtmlu eof-in-table(R:RhRARDRiRLRv(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu|scS`sH|jj}|jjd|j_||jj_|jjj|dS(Nu inTableText(RDRWR>RRo(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRos cS`sH|jj}|jjd|j_||jj_|jjj|dS(Nu inTableText(RDRWR>RRn(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRns cS`s3t|j_|jjdj|t|j_dS(NuinBody(R\R:tinsertFromTableRDR>RnRN(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sG|j|jjjt|jj||jjd|j_dS(Nu inCaption( R?R:RRtR RRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR7s cS`s4|j|jj||jjd|j_dS(Nu inColumnGroup(R?R:RRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8s cS`s|jtdd|S(NucolgroupuStartTag(R8R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR9scS`s4|j|jj||jjd|j_dS(Nu inTableBody(R?R:RRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR:s cS`s|jtdd|S(NutbodyuStartTag(R:R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR;scS`sN|jjdidd6dd6|jjjtd|jjsJ|SdS(Nu$unexpected-start-tag-implies-end-tagutableu startNameuendName(RDRiRWRqRRL(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs   cS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<scS`sqd|dkr`|ddjtdkr`|jjd|jj||jjjn |j|dS(Nutypeudatauhiddenu unexpected-hidden-input-in-table( RcR RDRiR:RRhRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s_|jjd|jjdkr[|jj||jjd|j_|jjjndS(Nuunexpected-form-in-tablei(RDRiR:RR9RRhR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sQ|jjdi|dd6t|j_|jjdj|t|j_dS(Nu)unexpected-start-tag-implies-table-voodoounameuinBody(RDRiR\R:R@R>RpRN(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdddr|jj|jjdjdkro|jjdidd6|jjdjd6nx-|jjdjdkr|jjjqrW|jjj|jjn|jj st |jjdS(NutableRiuend-tag-too-early-namedugotNameu expectedName( R:RRRhRARDRiRRYRLRv(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=s   cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR>scS`sQ|jjdi|dd6t|j_|jjdj|t|j_dS(Nu'unexpected-end-tag-implies-table-voodoounameuinBody(RDRiR\R:R@R>RqRN(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs (R5R6RCR?RuRoRnRR7R8R9R:R;RR<RRRR=R>R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR6Ys&               tInTableTextPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`s)j|||d|_g|_dS(N(RCR9RtcharacterTokens(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs cS`sdjg|jD]}|d^q}tg|D]}|tk^q3ritdd6|d6}|jjdj|n|r|jj|ng|_dS(Nuudatau CharactersutypeuinTable( tjoinRBRR RRDR>RR:(R?R"RR~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytflushCharacterss)%cS`s|j|j|j_|S(N(RDRRDRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRrs cS`s|j|j|j_tS(N(RDRRDRWR\(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRus cS`s(|ddkrdS|jj|dS(Nudatau(RBRt(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s|jj|dS(N(RBRt(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|j|j|j_|S(N(RDRRDRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRp s cS`s|j|j|j_|S(N(RDRRDRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqs ( R5R6RCRDRrRuRnRoRpRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRAs     tInCaptionPhasec`sheZfdZdZdZdZdZdZdZdZ dZ d Z RS( c `sj|||tjd|jfd |jfg|_|j|j_tjd|jfd |j fd|j fg|_ |j |j _dS(Nuhtmlucaptionucolucolgrouputbodyutdutfootuthutheadutrutableubody( ucaptionucolucolgrouputbodyutdutfootuthutheadutr( ubodyucolucolgroupuhtmlutbodyutdutfootuthutheadutr( RCR RRtstartTagTableElementRRRt endTagCaptionR=R>RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs   cS`s|jjddd S(NucaptionRutable(R:R(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagCaption+scS`s|jjdjdS(NuinBody(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu.scS`s|jjdj|S(NuinBody(RDR>Rn(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn1scS`s@|jj|j}|jjjtd|s<|SdS(Nucaption(RDRiRHRWRqR(R?R~t ignoreEndTag((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRF4s   cS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR<scS`s|js|jj|jjdjdkrc|jjdidd6|jjdjd6nx-|jjdjdkr|jjjqfW|jjj|jj|jj d|j_ n|jj st |jjdS(Niucaptionu$expected-one-end-tag-but-got-anotherugotNameu expectedNameuinTable( RHR:RRhRARDRiRR3R>RWRLRv(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRG?s     cS`s@|jj|j}|jjjtd|s<|SdS(Nucaption(RDRiRHRWRqR(R?R~RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=Qs   cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR>XscS`s|jjdj|S(NuinBody(RDR>Rq(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR[s( R5R6RCRHRuRnRFRRGR=R>R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyREs        tInColumnGroupPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sj|||tjd|jfd|jfg|_|j|j_tjd|jfd|j fg|_ |j |j _dS(Nuhtmlucolucolgroup( RCR RRR9RRRtendTagColgroupt endTagColRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCas  cS`s|jjdjdkS(Niuhtml(R:RhRA(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagColgrouppscS`s\|jjdjdkr/|jjs+tdS|j}|jtd|sXt SdS(Niuhtmlucolgroup( R:RhRARDRLRvRMRKRR\(R?RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuss cS`s-|j}|jtd|s)|SdS(Nucolgroup(RMRKR(R?R~RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn}s cS`s.|jj||jjjt|dRW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRKs  cS`s|jjdidd6dS(Nu no-end-tagucoluname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRLscS`s-|j}|jtd|s)|SdS(Nucolgroup(RMRKR(R?R~RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs ( R5R6RCRMRuRnR9RRKRLR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRJ^s     tInTableBodyPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c `sj|||tjd|jfd|jfd |jfd|jfg|_|j|j_ tjd|j fd |j fd|j fg|_ |j|j _ dS(Nuhtmlutrutduthucaptionucolucolgrouputbodyutfootutheadutableubody(utduth(ucaptionucolucolgrouputbodyutfootuthead(utbodyutfootuthead(ubodyucaptionucolucolgroupuhtmlutduthutr(RCR RRt startTagTrtstartTagTableCelltstartTagTableOtherRRRtendTagTableRowGroupR=R>RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs     cS`sbx-|jjdjdkr/|jjjqW|jjdjdkr^|jjs^tndS(Niutbodyutfootutheaduhtml(utbodyutfootutheaduhtml(R:RhRARRDRLRv(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableBodyContexts  cS`s|jjdjdS(NuinTable(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuscS`s|jjdj|S(NuinTable(RDR>Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoscS`s|jjdj|S(NuinTable(RDR>Rn(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s4|j|jj||jjd|j_dS(NuinRow(RSR:RRDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyROs cS`s8|jjdi|dd6|jtdd|S(Nuunexpected-cell-in-table-bodyunameutruStartTag(RDRiROR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRPs cS`s|jjdddsH|jjdddsH|jjdddrv|j|jt|jjdj|S|jjst |jj dS(NutbodyRutableutheadutfooti( R:RRSRRRRhRARDRLRvRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRQs cS`s|jjdj|S(NuinTable(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`sq|jj|dddrO|j|jjj|jjd|j_n|jjdi|dd6dS(NunameRutableuinTableu unexpected-end-tag-in-table-body( R:RRSRhRRDR>RWRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRRs   cS`s|jjdddsH|jjdddsH|jjdddrv|j|jt|jjdj|S|jjst |jj dS(NutbodyRutableutheadutfooti( R:RRSRRRRhRARDRLRvRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=s cS`s"|jjdi|dd6dS(Nu unexpected-end-tag-in-table-bodyuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR>s cS`s|jjdj|S(NuinTable(RDR>Rq(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs(R5R6RCRSRuRoRnRORPRQRRRR=R>R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRNs        t InRowPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZRS(c `sj|||tjd|jfd |jfd|jfg|_|j|j_tjd |j fd |j fd|j fd|j fg|_ |j|j _dS(Nuhtmlutduthucaptionucolucolgrouputbodyutfootutheadutrutableubody(utduth(ucaptionucolucolgrouputbodyutfootutheadutr(utbodyutfootuthead(ubodyucaptionucolucolgroupuhtmlutduth(RCR RRRPRQRRRtendTagTrR=RRR>RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs     cS`s[xT|jjdjdkrV|jjdi|jjdjd6|jjjqWdS(Niutruhtmlu'unexpected-implied-end-tag-in-table-rowuname(utruhtml(R:RhRARDRiR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytclearStackToTableRowContexts cS`s|jjddd S(NutrRutable(R:R(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pytignoreEndTagTrscS`s|jjdjdS(NuinTable(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu"scS`s|jjdj|S(NuinTable(RDR>Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo%scS`s|jjdj|S(NuinTable(RDR>Rn(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn(scS`sG|j|jj||jjd|j_|jjjtdS(NuinCell( RVR:RRDR>RWRRtR (R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRP+s cS`s-|j}|jtd|s)|SdS(Nutr(RWRUR(R?R~RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRQ1s cS`s|jjdj|S(NuinTable(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR8scS`sb|js?|j|jjj|jjd|j_n|jjsQt |jj dS(Nu inTableBody( RWRVR:RhRRDR>RWRLRvRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRU;s   cS`s-|j}|jtd|s)|SdS(Nutr(RWRUR(R?R~RI((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=Es cS`sD|jj|dddr3|jtd|S|jjdS(NunameRutableutr(R:RRURRDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRRMscS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-table-rowuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR>Ts cS`s|jjdj|S(NuinTable(RDR>Rq(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRXs(R5R6RCRVRWRuRoRnRPRQRRUR=RRR>R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRTs           t InCellPhasec`sheZfdZdZdZdZdZdZdZdZ dZ d Z RS( c `sj|||tjd|jfd |jfg|_|j|j_tjd|jfd|j fd|j fg|_ |j |j _dS(Nuhtmlucaptionucolucolgrouputbodyutdutfootuthutheadutrubodyutable( ucaptionucolucolgrouputbodyutdutfootuthutheadutr(utduth(ubodyucaptionucolucolgroupuhtml(utableutbodyutfootutheadutr( RCR RRRQRRRtendTagTableCellR>t endTagImplyRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC]s   cS`s`|jjdddr.|jtdn.|jjdddr\|jtdndS(NutdRutableuth(R:RRYR(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyt closeCellnscS`s|jjdjdS(NuinBody(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuuscS`s|jjdj|S(NuinBody(RDR>Rn(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnxscS`sa|jjddds0|jjdddr>|j|S|jjsPt|jjdS(NutdRutableuth(R:RR[RDRLRvRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRQ{s  cS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s|jj|dddr|jj|d|jjdj|dkr|jjdi|dd6xFtr|jjj}|j|dkrnPqnqnWn|jjj|jj |jj d|j_ n|jjdi|dd6dS(NunameRutableiuunexpected-cell-end-taguinRowuunexpected-end-tag( R:RRRhRARDRiR\RR3R>RW(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRYs    cS`s"|jjdi|dd6dS(Nuunexpected-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR>scS`s;|jj|dddr*|j|S|jjdS(NunameRutable(R:RR[RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRZs cS`s|jjdj|S(NuinBody(RDR>Rq(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs( R5R6RCR[RuRnRQRRYR>RZR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRX[s       t InSelectPhasec`seZfdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d ZRS( c`sj|||tjd|jfd|jfd|jfd|jfd |jfd|jfg|_ |j |j _ tjd|j fd|j fd|jfg|_|j|j_ dS( Nuhtmluoptionuoptgroupuselectuinputukeygenutextareauscript(uinputukeygenutextarea(RCR RRtstartTagOptiontstartTagOptgroupRRRRRRt endTagOptiontendTagOptgroupt endTagSelectRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCs       cS`sB|jjdjdkr,|jjdn|jjs>tdS(Niuhtmlu eof-in-select(R:RhRARDRiRLRv(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRuscS`s,|ddkrdS|jj|ddS(Nudatau(R:R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnscS`s@|jjdjdkr,|jjjn|jj|dS(Niuoption(R:RhRARR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR]scS`sl|jjdjdkr,|jjjn|jjdjdkrX|jjjn|jj|dS(Niuoptionuoptgroup(R:RhRARR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR^s cS`s'|jjd|jtddS(Nuunexpected-select-in-selectuselect(RDRiRaR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`sU|jjd|jjdddr?|jtd|S|jjsQtdS(Nuunexpected-input-in-selectuselectR(RDRiR:RRaRRLRv(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRscS`s"|jjdi|dd6dS(Nuunexpected-start-tag-in-selectuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRs cS`sJ|jjdjdkr,|jjjn|jjdidd6dS(Niuoptionuunexpected-end-tag-in-selectuname(R:RhRARRDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR_s cS`s|jjdjdkrE|jjdjdkrE|jjjn|jjdjdkrq|jjjn|jjdidd6dS(Niuoptioniuoptgroupuunexpected-end-tag-in-selectuname(R:RhRARRDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR`s cS`s|jjdddrb|jjj}x%|jdkrQ|jjj}q-W|jjn|jjstt|jj dS(NuselectR( R:RRhRRARDRYRLRvRi(R?R~R((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRascS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-selectuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s (R5R6RCRuRnR]R^RRRRR_R`RaR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR\s          tInSelectInTablePhasec`sMeZfdZdZdZdZdZdZdZRS(c `sqj|||tjd |jfg|_|j|j_tjd |jfg|_|j |j_dS( Nucaptionutableutbodyutfootutheadutrutduth(ucaptionutableutbodyutfootutheadutrutduth(ucaptionutableutbodyutfootutheadutrutduth( RCR RRRRRR=RR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC scS`s|jjdjdS(NuinSelect(RDR>Ru(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjdj|S(NuinSelect(RDR>Rn(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s5|jjdi|dd6|jtd|S(Nu5unexpected-table-element-start-tag-in-select-in-tableunameuselect(RDRiRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR! scS`s|jjdj|S(NuinSelect(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR& scS`sU|jjdi|dd6|jj|dddrQ|jtd|SdS(Nu3unexpected-table-element-end-tag-in-select-in-tableunameRutableuselect(RDRiR:RRR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=) scS`s|jjdj|S(NuinSelect(RDR>Rq(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR/ s( R5R6RCRuRnRRR=R((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRb s     tInForeignContentPhasec-`seZedddddddddd d d d d ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+g,Zfd,Zd-Zfd.Zd/Zd0ZRS(1ububigu blockquoteubodyubrucenterucodeuddudivudludtuemuembeduh1uh2uh3uh4uh5uh6uheaduhruiuimguliulistingumenuumetaunobruolupupreurubyususmalluspanustrongustrikeusubusuputableuttuuuuluvarc`sj|||dS(N(RC(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC< scS`s+i$dd6dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6}|dI|kr'||dI|dIl s(RDR]RRNRn(R?R~(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnh s   cS`s|jjd}|d|jks\|ddkrt|djtdddg@r|jjdi|dd6xm|jjdj|jjkr|jj |jjd r|jj |jjd r|jjj q}W|S|jt d kr|jj |n3|jt d krG|j||jj|n|jj||j|d <|jj||d r|jjj t|d RDRRRvR9RaRlRq(R?R~t nodeIndexRR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRq s(!  ( R5R6RmRfRCReRnRpRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRc2 s ) tAfterBodyPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`sqj|||tjd|jfg|_|j|j_tjd|jfg|_|j |j_dS(Nuhtml( RCR RRRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s!|jj||jjddS(Ni(R:RRh(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr scS`s*|jjd|jjd|j_|S(Nuunexpected-char-after-bodyuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s8|jjdi|dd6|jjd|j_|S(Nuunexpected-start-tag-after-bodyunameuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s9|jjr|jjdn|jjd|j_dS(Nu'unexpected-end-tag-after-body-innerhtmluafterAfterBody(RDRLRiR>RW(R?RA((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s8|jjdi|dd6|jjd|j_|S(Nuunexpected-end-tag-after-bodyunameuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s ( R5R6RCRuRrRnRRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRj s      tInFramesetPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sj|||tjd|jfd|jfd|jfd|jfg|_|j|j_ tjd|j fg|_ |j |j _ dS(Nuhtmluframesetuframeunoframes( RCR RRRt startTagFrametstartTagNoframesRRRtendTagFramesetRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s   cS`sB|jjdjdkr,|jjdn|jjs>tdS(Niuhtmlueof-in-frameset(R:RhRARDRiRLRv(R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjddS(Nuunexpected-char-in-frameset(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jj|dS(N(R:R(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR scS`s$|jj||jjjdS(N(R:RRhR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRl scS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRm scS`s"|jjdi|dd6dS(Nu unexpected-start-tag-in-framesetuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s cS`s|jjdjdkr,|jjdn|jjj|jj r{|jjdjdkr{|jjd|j_ndS(Niuhtmlu)unexpected-frameset-in-frameset-innerhtmluframesetu afterFrameset( R:RhRARDRiRRLR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn s  cS`s"|jjdi|dd6dS(Nuunexpected-end-tag-in-framesetuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s ( R5R6RCRuRnRRlRmRRnR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRk s       tAfterFramesetPhasec`sMeZfdZdZdZdZdZdZdZRS(c`s}j|||tjd|jfd|jfg|_|j|j_tjd|jfg|_ |j |j _dS(Nuhtmlunoframes( RCR RRRmRRRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC s cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu scS`s|jjddS(Nuunexpected-char-after-frameset(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn! scS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRm$ scS`s"|jjdi|dd6dS(Nu#unexpected-start-tag-after-framesetuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR' s cS`s|jjd|j_dS(NuafterAfterFrameset(RDR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR+ scS`s"|jjdi|dd6dS(Nu!unexpected-end-tag-after-framesetuname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR. s ( R5R6RCRuRnRmRRR((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRo s     tAfterAfterBodyPhasec`sVeZfdZdZdZdZdZdZdZdZ RS(c`sDj|||tjd|jfg|_|j|j_dS(Nuhtml(RCR RRRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRC3 scS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRu; scS`s|jj||jjdS(N(R:RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRr> scS`s|jjdj|S(NuinBody(RDR>Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoA scS`s*|jjd|jjd|j_|S(Nuexpected-eof-but-got-charuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnD scS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRI scS`s8|jjdi|dd6|jjd|j_|S(Nuexpected-eof-but-got-start-tagunameuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRL s cS`s8|jjdi|dd6|jjd|j_|S(Nuexpected-eof-but-got-end-tagunameuinBody(RDRiR>RW(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqR s ( R5R6RCRuRrRoRnRRRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRp2 s      tAfterAfterFramesetPhasec`s_eZfdZdZdZdZdZdZdZdZ dZ RS( c`sPj|||tjd|jfd|jfg|_|j|j_dS(Nuhtmlunoframes(RCR RRtstartTagNoFramesRRR(R?RDR:(R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRCY s  cS`sdS(N((R?((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRub scS`s|jj||jjdS(N(R:RR(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRre scS`s|jjdj|S(NuinBody(RDR>Ro(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRoh scS`s|jjddS(Nuexpected-eof-but-got-char(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRnk scS`s|jjdj|S(NuinBody(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRn scS`s|jjdj|S(NuinHead(RDR>Rp(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRrq scS`s"|jjdi|dd6dS(Nuexpected-eof-but-got-start-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRt s cS`s"|jjdi|dd6dS(Nuexpected-eof-but-got-end-taguname(RDRi(R?R~((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqx s ( R5R6RCRuRrRoRnRRrRRq((R(sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyRqX s       uinitialu beforeHtmlu beforeHeaduinHeaduinHeadNoscriptu afterHeaduinBodyutextuinTableu inTableTextu inCaptionu inColumnGroupu inTableBodyuinRowuinCelluinSelectuinSelectInTableuinForeignContentu afterBodyu inFramesetu afterFramesetuafterAfterBodyuafterAfterFrameset(R(R@RPRRRRRRRRR4R6RARERJRNRTRXR\RbRcRjRkRoRpRq((RsD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR=_sh  %)#.g@C!-GBbYLd's/9%&%c`s}tstjr,t|dt@}nt|dt@}|rytfd|djD|d s(RR tPY27RRmRR((R~Rdtneeds_adjustment((RdsD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s uEndTagcC`s9|dkri}nit|d6|d6|d6|d6S(Nutypeunameudatau selfClosing(R9R(RAR,RbR((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s  RcB`seZdZRS(uError in parsed document(R5R6R(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyR s(4t __future__RRRtpip._vendor.sixRRRR*t collectionsRt ImportErrortpip._vendor.ordereddicttRRR ttreebuilders.baseR R t constantsR R RRRRRRRRRRRRRRRR\RR&R7tobjectRtmemoizeR=RR9RNRt ExceptionR(((sD/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyts>  j  (C  PK.e[eNJJhtml5lib/__init__.pyonu[ abc@`sdZddlmZmZmZddlmZmZmZddl m Z ddl m Z ddl mZdd d d d d gZdZdS(uM HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage: import html5lib f = open("my_document.html") tree = html5lib.parse(f) i(tabsolute_importtdivisiontunicode_literalsi(t HTMLParsertparset parseFragment(tgetTreeBuilder(t getTreeWalker(t serializeu HTMLParseruparseu parseFragmentugetTreeBuilderu getTreeWalkeru serializeu1.0b10N(t__doc__t __future__RRRt html5parserRRRt treebuildersRt treewalkersRt serializerRt__all__t __version__(((sA/usr/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.pyt s  PK.e[cwܲhtml5lib/html5parser.pynu[from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import with_metaclass, viewkeys, PY3 import types try: from collections import OrderedDict except ImportError: from pip._vendor.ordereddict import OrderedDict from . import _inputstream from . import _tokenizer from . import treebuilders from .treebuilders.base import Marker from . import _utils from .constants import ( spaceCharacters, asciiUpper2Lower, specialElements, headingElements, cdataElements, rcdataElements, tokenTypes, tagTokenTypes, namespaces, htmlIntegrationPointElements, mathmlTextIntegrationPointElements, adjustForeignAttributes as adjustForeignAttributesMap, adjustMathMLAttributes, adjustSVGAttributes, E, ReparseException ) def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): """Parse a string or file-like object into a tree""" tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parse(doc, **kwargs) def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, **kwargs) def method_decorator_metaclass(function): class Decorated(type): def __new__(meta, classname, bases, classDict): for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): attribute = function(attribute) classDict[attributeName] = attribute return type.__new__(meta, classname, bases, classDict) return Decorated class HTMLParser(object): """HTML parser. Generates a tree structure from a stream of (possibly malformed) HTML""" def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): """ strict - raise an exception when a parse error is encountered tree - a treebuilder class controlling the type of tree that will be returned. Built in treebuilders can be accessed through html5lib.treebuilders.getTreeBuilder(treeType) """ # Raise an exception on the first error encountered self.strict = strict if tree is None: tree = treebuilders.getTreeBuilder("etree") self.tree = tree(namespaceHTMLElements) self.errors = [] self.phases = dict([(name, cls(self, self.tree)) for name, cls in getPhases(debug).items()]) def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): self.innerHTMLMode = innerHTML self.container = container self.scripting = scripting self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) self.reset() try: self.mainLoop() except ReparseException: self.reset() self.mainLoop() def reset(self): self.tree.reset() self.firstStartTag = False self.errors = [] self.log = [] # only used with debug mode # "quirks" / "limited quirks" / "no quirks" self.compatMode = "no quirks" if self.innerHTMLMode: self.innerHTML = self.container.lower() if self.innerHTML in cdataElements: self.tokenizer.state = self.tokenizer.rcdataState elif self.innerHTML in rcdataElements: self.tokenizer.state = self.tokenizer.rawtextState elif self.innerHTML == 'plaintext': self.tokenizer.state = self.tokenizer.plaintextState else: # state already is data state # self.tokenizer.state = self.tokenizer.dataState pass self.phase = self.phases["beforeHtml"] self.phase.insertHtmlElement() self.resetInsertionMode() else: self.innerHTML = False # pylint:disable=redefined-variable-type self.phase = self.phases["initial"] self.lastPhase = None self.beforeRCDataPhase = None self.framesetOK = True @property def documentEncoding(self): """The name of the character encoding that was used to decode the input stream, or :obj:`None` if that is not determined yet. """ if not hasattr(self, 'tokenizer'): return None return self.tokenizer.stream.charEncoding[0].name def isHTMLIntegrationPoint(self, element): if (element.name == "annotation-xml" and element.namespace == namespaces["mathml"]): return ("encoding" in element.attributes and element.attributes["encoding"].translate( asciiUpper2Lower) in ("text/html", "application/xhtml+xml")) else: return (element.namespace, element.name) in htmlIntegrationPointElements def isMathMLTextIntegrationPoint(self, element): return (element.namespace, element.name) in mathmlTextIntegrationPointElements def mainLoop(self): CharactersToken = tokenTypes["Characters"] SpaceCharactersToken = tokenTypes["SpaceCharacters"] StartTagToken = tokenTypes["StartTag"] EndTagToken = tokenTypes["EndTag"] CommentToken = tokenTypes["Comment"] DoctypeToken = tokenTypes["Doctype"] ParseErrorToken = tokenTypes["ParseError"] for token in self.normalizedTokens(): prev_token = None new_token = token while new_token is not None: prev_token = new_token currentNode = self.tree.openElements[-1] if self.tree.openElements else None currentNodeNamespace = currentNode.namespace if currentNode else None currentNodeName = currentNode.name if currentNode else None type = new_token["type"] if type == ParseErrorToken: self.parseError(new_token["data"], new_token.get("datavars", {})) new_token = None else: if (len(self.tree.openElements) == 0 or currentNodeNamespace == self.tree.defaultNamespace or (self.isMathMLTextIntegrationPoint(currentNode) and ((type == StartTagToken and token["name"] not in frozenset(["mglyph", "malignmark"])) or type in (CharactersToken, SpaceCharactersToken))) or (currentNodeNamespace == namespaces["mathml"] and currentNodeName == "annotation-xml" and type == StartTagToken and token["name"] == "svg") or (self.isHTMLIntegrationPoint(currentNode) and type in (StartTagToken, CharactersToken, SpaceCharactersToken))): phase = self.phase else: phase = self.phases["inForeignContent"] if type == CharactersToken: new_token = phase.processCharacters(new_token) elif type == SpaceCharactersToken: new_token = phase.processSpaceCharacters(new_token) elif type == StartTagToken: new_token = phase.processStartTag(new_token) elif type == EndTagToken: new_token = phase.processEndTag(new_token) elif type == CommentToken: new_token = phase.processComment(new_token) elif type == DoctypeToken: new_token = phase.processDoctype(new_token) if (type == StartTagToken and prev_token["selfClosing"] and not prev_token["selfClosingAcknowledged"]): self.parseError("non-void-element-with-trailing-solidus", {"name": prev_token["name"]}) # When the loop finishes it's EOF reprocess = True phases = [] while reprocess: phases.append(self.phase) reprocess = self.phase.processEOF() if reprocess: assert self.phase not in phases def normalizedTokens(self): for token in self.tokenizer: yield self.normalizeToken(token) def parse(self, stream, *args, **kwargs): """Parse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on """ self._parse(stream, False, None, *args, **kwargs) return self.tree.getDocument() def parseFragment(self, stream, *args, **kwargs): """Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) scripting - treat noscript elements as if javascript was turned on """ self._parse(stream, True, *args, **kwargs) return self.tree.getFragment() def parseError(self, errorcode="XXX-undefined-error", datavars=None): # XXX The idea is to make errorcode mandatory. if datavars is None: datavars = {} self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) if self.strict: raise ParseError(E[errorcode] % datavars) def normalizeToken(self, token): """ HTML5 specific normalizations to the token stream """ if token["type"] == tokenTypes["StartTag"]: raw = token["data"] token["data"] = OrderedDict(raw) if len(raw) > len(token["data"]): # we had some duplicated attribute, fix so first wins token["data"].update(raw[::-1]) return token def adjustMathMLAttributes(self, token): adjust_attributes(token, adjustMathMLAttributes) def adjustSVGAttributes(self, token): adjust_attributes(token, adjustSVGAttributes) def adjustForeignAttributes(self, token): adjust_attributes(token, adjustForeignAttributesMap) def reparseTokenNormal(self, token): # pylint:disable=unused-argument self.parser.phase() def resetInsertionMode(self): # The name of this method is mostly historical. (It's also used in the # specification.) last = False newModes = { "select": "inSelect", "td": "inCell", "th": "inCell", "tr": "inRow", "tbody": "inTableBody", "thead": "inTableBody", "tfoot": "inTableBody", "caption": "inCaption", "colgroup": "inColumnGroup", "table": "inTable", "head": "inBody", "body": "inBody", "frameset": "inFrameset", "html": "beforeHead" } for node in self.tree.openElements[::-1]: nodeName = node.name new_phase = None if node == self.tree.openElements[0]: assert self.innerHTML last = True nodeName = self.innerHTML # Check for conditions that should only happen in the innerHTML # case if nodeName in ("select", "colgroup", "head", "html"): assert self.innerHTML if not last and node.namespace != self.tree.defaultNamespace: continue if nodeName in newModes: new_phase = self.phases[newModes[nodeName]] break elif last: new_phase = self.phases["inBody"] break self.phase = new_phase def parseRCDataRawtext(self, token, contentType): """Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT """ assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"] @_utils.memoize def getPhases(debug): def log(function): """Logger that records which phase processes each token""" type_names = dict((value, key) for key, value in tokenTypes.items()) def wrapped(self, *args, **kwargs): if function.__name__.startswith("process") and len(args) > 0: token = args[0] try: info = {"type": type_names[token['type']]} except: raise if token['type'] in tagTokenTypes: info["name"] = token['name'] self.parser.log.append((self.parser.tokenizer.state.__name__, self.parser.phase.__class__.__name__, self.__class__.__name__, function.__name__, info)) return function(self, *args, **kwargs) else: return function(self, *args, **kwargs) return wrapped def getMetaclass(use_metaclass, metaclass_func): if use_metaclass: return method_decorator_metaclass(metaclass_func) else: return type # pylint:disable=unused-argument class Phase(with_metaclass(getMetaclass(debug, log))): """Base class for helper object that implements each phase of processing """ def __init__(self, parser, tree): self.parser = parser self.tree = tree def processEOF(self): raise NotImplementedError def processComment(self, token): # For most phases the following is correct. Where it's not it will be # overridden. self.tree.insertComment(token, self.tree.openElements[-1]) def processDoctype(self, token): self.parser.parseError("unexpected-doctype") def processCharacters(self, token): self.tree.insertText(token["data"]) def processSpaceCharacters(self, token): self.tree.insertText(token["data"]) def processStartTag(self, token): return self.startTagHandler[token["name"]](token) def startTagHtml(self, token): if not self.parser.firstStartTag and token["name"] == "html": self.parser.parseError("non-html-root") # XXX Need a check here to see if the first start tag token emitted is # this token... If it's not, invoke self.parser.parseError(). for attr, value in token["data"].items(): if attr not in self.tree.openElements[0].attributes: self.tree.openElements[0].attributes[attr] = value self.parser.firstStartTag = False def processEndTag(self, token): return self.endTagHandler[token["name"]](token) class InitialPhase(Phase): def processSpaceCharacters(self, token): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] correct = token["correct"] if (name != "html" or publicId is not None or systemId is not None and systemId != "about:legacy-compat"): self.parser.parseError("unknown-doctype") if publicId is None: publicId = "" self.tree.insertDoctype(token) if publicId != "": publicId = publicId.translate(asciiUpper2Lower) if (not correct or token["name"] != "html" or publicId.startswith( ("+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//")) or publicId in ("-//w3o//dtd w3 html strict 3.0//en//", "-/w3c/dtd html 4.0 transitional/en", "html") or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is None or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", "-//w3c//dtd xhtml 1.0 transitional//")) or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): self.parser.compatMode = "limited quirks" self.parser.phase = self.parser.phases["beforeHtml"] def anythingElse(self): self.parser.compatMode = "quirks" self.parser.phase = self.parser.phases["beforeHtml"] def processCharacters(self, token): self.parser.parseError("expected-doctype-but-got-chars") self.anythingElse() return token def processStartTag(self, token): self.parser.parseError("expected-doctype-but-got-start-tag", {"name": token["name"]}) self.anythingElse() return token def processEndTag(self, token): self.parser.parseError("expected-doctype-but-got-end-tag", {"name": token["name"]}) self.anythingElse() return token def processEOF(self): self.parser.parseError("expected-doctype-but-got-eof") self.anythingElse() return True class BeforeHtmlPhase(Phase): # helper methods def insertHtmlElement(self): self.tree.insertRoot(impliedTagToken("html", "StartTag")) self.parser.phase = self.parser.phases["beforeHead"] # other def processEOF(self): self.insertHtmlElement() return True def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.insertHtmlElement() return token def processStartTag(self, token): if token["name"] == "html": self.parser.firstStartTag = True self.insertHtmlElement() return token def processEndTag(self, token): if token["name"] not in ("head", "body", "html", "br"): self.parser.parseError("unexpected-end-tag-before-html", {"name": token["name"]}) else: self.insertHtmlElement() return token class BeforeHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = _utils.MethodDispatcher([ (("head", "body", "html", "br"), self.endTagImplyHead) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.startTagHead(impliedTagToken("head", "StartTag")) return True def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.tree.insertElement(token) self.tree.headPointer = self.tree.openElements[-1] self.parser.phase = self.parser.phases["inHead"] def startTagOther(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagImplyHead(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagOther(self, token): self.parser.parseError("end-tag-after-implied-root", {"name": token["name"]}) class InHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("title", self.startTagTitle), (("noframes", "style"), self.startTagNoFramesStyle), ("noscript", self.startTagNoscript), ("script", self.startTagScript), (("base", "basefont", "bgsound", "command", "link"), self.startTagBaseLinkCommand), ("meta", self.startTagMeta), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = _utils.MethodDispatcher([ ("head", self.endTagHead), (("br", "html", "body"), self.endTagHtmlBodyBr) ]) self.endTagHandler.default = self.endTagOther # the real thing def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.parser.parseError("two-heads-are-not-better-than-one") def startTagBaseLinkCommand(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMeta(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True attributes = token["data"] if self.parser.tokenizer.stream.charEncoding[1] == "tentative": if "charset" in attributes: self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) elif ("content" in attributes and "http-equiv" in attributes and attributes["http-equiv"].lower() == "content-type"): # Encoding it as UTF-8 here is a hack, as really we should pass # the abstract Unicode string, and just use the # ContentAttrParser on that, but using UTF-8 allows all chars # to be encoded and as a ASCII-superset works. data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) parser = _inputstream.ContentAttrParser(data) codec = parser.parse() self.parser.tokenizer.stream.changeEncoding(codec) def startTagTitle(self, token): self.parser.parseRCDataRawtext(token, "RCDATA") def startTagNoFramesStyle(self, token): # Need to decide whether to implement the scripting-disabled case self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagNoscript(self, token): if self.parser.scripting: self.parser.parseRCDataRawtext(token, "RAWTEXT") else: self.tree.insertElement(token) self.parser.phase = self.parser.phases["inHeadNoscript"] def startTagScript(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState self.parser.originalPhase = self.parser.phase self.parser.phase = self.parser.phases["text"] def startTagOther(self, token): self.anythingElse() return token def endTagHead(self, token): node = self.parser.tree.openElements.pop() assert node.name == "head", "Expected head got %s" % node.name self.parser.phase = self.parser.phases["afterHead"] def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.endTagHead(impliedTagToken("head")) class InHeadNoscriptPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand), (("head", "noscript"), self.startTagHeadNoscript), ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = _utils.MethodDispatcher([ ("noscript", self.endTagNoscript), ("br", self.endTagBr), ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.parser.parseError("eof-in-head-noscript") self.anythingElse() return True def processComment(self, token): return self.parser.phases["inHead"].processComment(token) def processCharacters(self, token): self.parser.parseError("char-in-head-noscript") self.anythingElse() return token def processSpaceCharacters(self, token): return self.parser.phases["inHead"].processSpaceCharacters(token) def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBaseLinkCommand(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagHeadNoscript(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) self.anythingElse() return token def endTagNoscript(self, token): node = self.parser.tree.openElements.pop() assert node.name == "noscript", "Expected noscript got %s" % node.name self.parser.phase = self.parser.phases["inHead"] def endTagBr(self, token): self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): # Caller must raise parse error first! self.endTagNoscript(impliedTagToken("noscript")) class AfterHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", "style", "title"), self.startTagFromHead), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), self.endTagHtmlBodyBr)]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBody(self, token): self.parser.framesetOK = False self.tree.insertElement(token) self.parser.phase = self.parser.phases["inBody"] def startTagFrameset(self, token): self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagFromHead(self, token): self.parser.parseError("unexpected-start-tag-out-of-my-head", {"name": token["name"]}) self.tree.openElements.append(self.tree.headPointer) self.parser.phases["inHead"].processStartTag(token) for node in self.tree.openElements[::-1]: if node.name == "head": self.tree.openElements.remove(node) break def startTagHead(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.anythingElse() return token def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.tree.insertElement(impliedTagToken("body", "StartTag")) self.parser.phase = self.parser.phases["inBody"] self.parser.framesetOK = True class InBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody # the really-really-really-very crazy mode def __init__(self, parser, tree): Phase.__init__(self, parser, tree) # Set this to the default handler self.processSpaceCharacters = self.processSpaceCharactersNonPre self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), (("base", "basefont", "bgsound", "command", "link", "meta", "script", "style", "title"), self.startTagProcessInHead), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("address", "article", "aside", "blockquote", "center", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", "section", "summary", "ul"), self.startTagCloseP), (headingElements, self.startTagHeading), (("pre", "listing"), self.startTagPreListing), ("form", self.startTagForm), (("li", "dd", "dt"), self.startTagListItem), ("plaintext", self.startTagPlaintext), ("a", self.startTagA), (("b", "big", "code", "em", "font", "i", "s", "small", "strike", "strong", "tt", "u"), self.startTagFormatting), ("nobr", self.startTagNobr), ("button", self.startTagButton), (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), ("xmp", self.startTagXmp), ("table", self.startTagTable), (("area", "br", "embed", "img", "keygen", "wbr"), self.startTagVoidFormatting), (("param", "source", "track"), self.startTagParamSource), ("input", self.startTagInput), ("hr", self.startTagHr), ("image", self.startTagImage), ("isindex", self.startTagIsIndex), ("textarea", self.startTagTextarea), ("iframe", self.startTagIFrame), ("noscript", self.startTagNoscript), (("noembed", "noframes"), self.startTagRawtext), ("select", self.startTagSelect), (("rp", "rt"), self.startTagRpRt), (("option", "optgroup"), self.startTagOpt), (("math"), self.startTagMath), (("svg"), self.startTagSvg), (("caption", "col", "colgroup", "frame", "head", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagMisplaced) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = _utils.MethodDispatcher([ ("body", self.endTagBody), ("html", self.endTagHtml), (("address", "article", "aside", "blockquote", "button", "center", "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", "section", "summary", "ul"), self.endTagBlock), ("form", self.endTagForm), ("p", self.endTagP), (("dd", "dt", "li"), self.endTagListItem), (headingElements, self.endTagHeading), (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", "strike", "strong", "tt", "u"), self.endTagFormatting), (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), ("br", self.endTagBr), ]) self.endTagHandler.default = self.endTagOther def isMatchingFormattingElement(self, node1, node2): return (node1.name == node2.name and node1.namespace == node2.namespace and node1.attributes == node2.attributes) # helper def addFormattingElement(self, token): self.tree.insertElement(token) element = self.tree.openElements[-1] matchingElements = [] for node in self.tree.activeFormattingElements[::-1]: if node is Marker: break elif self.isMatchingFormattingElement(node, element): matchingElements.append(node) assert len(matchingElements) <= 3 if len(matchingElements) == 3: self.tree.activeFormattingElements.remove(matchingElements[-1]) self.tree.activeFormattingElements.append(element) # the real deal def processEOF(self): allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")) for node in self.tree.openElements[::-1]: if node.name not in allowed_elements: self.parser.parseError("expected-closing-tag-but-got-eof") break # Stop parsing def processSpaceCharactersDropNewline(self, token): # Sometimes (start of
, , and