Current File : /home/mmdealscpanel/yummmdeals.com/pkg_resources.zip
PK�V[M�n=�=�__init__.pynu�[���# coding: utf-8
"""
Package resource API
--------------------

A resource is a logical file contained within a package, or a logical
subdirectory thereof.  The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is.  Do not use os.path operations to manipulate resource
names being passed into the API.

The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files.  It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""

from __future__ import absolute_import

import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer

try:
    import _imp
except ImportError:
    # Python 3.2 compatibility
    import imp as _imp

try:
    FileExistsError
except NameError:
    FileExistsError = OSError

from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter

# capture these to bypass sandboxing
from os import utime
try:
    from os import mkdir, rename, unlink
    WRITE_SUPPORT = True
except ImportError:
    # no write support, probably under GAE
    WRITE_SUPPORT = False

from os import open as os_open
from os.path import isdir, split

try:
    import importlib.machinery as importlib_machinery
    # access attribute to force import under delayed import mechanisms.
    importlib_machinery.__name__
except ImportError:
    importlib_machinery = None

from . import py31compat
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')


__metaclass__ = type


if (3, 0) < sys.version_info < (3, 4):
    raise RuntimeError("Python 3.4 or later is required")

if six.PY2:
    # Those builtin exceptions are only defined in Python 3
    PermissionError = None
    NotADirectoryError = None

# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None


class PEP440Warning(RuntimeWarning):
    """
    Used when there is an issue with a version or specifier not complying with
    PEP 440.
    """


def parse_version(v):
    try:
        return packaging.version.Version(v)
    except packaging.version.InvalidVersion:
        return packaging.version.LegacyVersion(v)


_state_vars = {}


def _declare_state(vartype, **kw):
    globals().update(kw)
    _state_vars.update(dict.fromkeys(kw, vartype))


def __getstate__():
    state = {}
    g = globals()
    for k, v in _state_vars.items():
        state[k] = g['_sget_' + v](g[k])
    return state


def __setstate__(state):
    g = globals()
    for k, v in state.items():
        g['_sset_' + _state_vars[k]](k, g[k], v)
    return state


def _sget_dict(val):
    return val.copy()


def _sset_dict(key, ob, state):
    ob.clear()
    ob.update(state)


def _sget_object(val):
    return val.__getstate__()


def _sset_object(key, ob, state):
    ob.__setstate__(state)


_sget_none = _sset_none = lambda *args: None


def get_supported_platform():
    """Return this platform's maximum compatible version.

    distutils.util.get_platform() normally reports the minimum version
    of Mac OS X that would be required to *use* extensions produced by
    distutils.  But what we want when checking compatibility is to know the
    version of Mac OS X that we are *running*.  To allow usage of packages that
    explicitly require a newer version of Mac OS X, we must also know the
    current version of the OS.

    If this condition occurs for any other platform with a version in its
    platform strings, this function should be extended accordingly.
    """
    plat = get_build_platform()
    m = macosVersionString.match(plat)
    if m is not None and sys.platform == "darwin":
        try:
            plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
        except ValueError:
            # not Mac OS X
            pass
    return plat


__all__ = [
    # Basic resource access and distribution/entry point discovery
    'require', 'run_script', 'get_provider', 'get_distribution',
    'load_entry_point', 'get_entry_map', 'get_entry_info',
    'iter_entry_points',
    'resource_string', 'resource_stream', 'resource_filename',
    'resource_listdir', 'resource_exists', 'resource_isdir',

    # Environmental control
    'declare_namespace', 'working_set', 'add_activation_listener',
    'find_distributions', 'set_extraction_path', 'cleanup_resources',
    'get_default_cache',

    # Primary implementation classes
    'Environment', 'WorkingSet', 'ResourceManager',
    'Distribution', 'Requirement', 'EntryPoint',

    # Exceptions
    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
    'UnknownExtra', 'ExtractionError',

    # Warnings
    'PEP440Warning',

    # Parsing functions and string utilities
    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',

    # filesystem utilities
    'ensure_directory', 'normalize_path',

    # Distribution "precedence" constants
    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',

    # "Provider" interfaces, implementations, and registration/lookup APIs
    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
    'register_finder', 'register_namespace_handler', 'register_loader_type',
    'fixup_namespace_packages', 'get_importer',

    # Warnings
    'PkgResourcesDeprecationWarning',

    # Deprecated/backward compatibility only
    'run_main', 'AvailableDistributions',
]


class ResolutionError(Exception):
    """Abstract base for dependency resolution errors"""

    def __repr__(self):
        return self.__class__.__name__ + repr(self.args)


class VersionConflict(ResolutionError):
    """
    An already-installed version conflicts with the requested version.

    Should be initialized with the installed Distribution and the requested
    Requirement.
    """

    _template = "{self.dist} is installed but {self.req} is required"

    @property
    def dist(self):
        return self.args[0]

    @property
    def req(self):
        return self.args[1]

    def report(self):
        return self._template.format(**locals())

    def with_context(self, required_by):
        """
        If required_by is non-empty, return a version of self that is a
        ContextualVersionConflict.
        """
        if not required_by:
            return self
        args = self.args + (required_by,)
        return ContextualVersionConflict(*args)


class ContextualVersionConflict(VersionConflict):
    """
    A VersionConflict that accepts a third parameter, the set of the
    requirements that required the installed Distribution.
    """

    _template = VersionConflict._template + ' by {self.required_by}'

    @property
    def required_by(self):
        return self.args[2]


class DistributionNotFound(ResolutionError):
    """A requested distribution was not found"""

    _template = ("The '{self.req}' distribution was not found "
                 "and is required by {self.requirers_str}")

    @property
    def req(self):
        return self.args[0]

    @property
    def requirers(self):
        return self.args[1]

    @property
    def requirers_str(self):
        if not self.requirers:
            return 'the application'
        return ', '.join(self.requirers)

    def report(self):
        return self._template.format(**locals())

    def __str__(self):
        return self.report()


class UnknownExtra(ResolutionError):
    """Distribution doesn't have an "extra feature" of the given name"""


_provider_factories = {}

PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1


def register_loader_type(loader_type, provider_factory):
    """Register `provider_factory` to make providers for `loader_type`

    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
    and `provider_factory` is a function that, passed a *module* object,
    returns an ``IResourceProvider`` for that module.
    """
    _provider_factories[loader_type] = provider_factory


def get_provider(moduleOrReq):
    """Return an IResourceProvider for the named module or requirement"""
    if isinstance(moduleOrReq, Requirement):
        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
    try:
        module = sys.modules[moduleOrReq]
    except KeyError:
        __import__(moduleOrReq)
        module = sys.modules[moduleOrReq]
    loader = getattr(module, '__loader__', None)
    return _find_adapter(_provider_factories, loader)(module)


def _macosx_vers(_cache=[]):
    if not _cache:
        version = platform.mac_ver()[0]
        # fallback for MacPorts
        if version == '':
            plist = '/System/Library/CoreServices/SystemVersion.plist'
            if os.path.exists(plist):
                if hasattr(plistlib, 'readPlist'):
                    plist_content = plistlib.readPlist(plist)
                    if 'ProductVersion' in plist_content:
                        version = plist_content['ProductVersion']

        _cache.append(version.split('.'))
    return _cache[0]


def _macosx_arch(machine):
    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)


def get_build_platform():
    """Return this platform's string for platform-specific distributions

    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
    needs some hacks for Linux and Mac OS X.
    """
    from sysconfig import get_platform

    plat = get_platform()
    if sys.platform == "darwin" and not plat.startswith('macosx-'):
        try:
            version = _macosx_vers()
            machine = os.uname()[4].replace(" ", "_")
            return "macosx-%d.%d-%s" % (
                int(version[0]), int(version[1]),
                _macosx_arch(machine),
            )
        except ValueError:
            # if someone is running a non-Mac darwin system, this will fall
            # through to the default implementation
            pass
    return plat


macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform


def compatible_platforms(provided, required):
    """Can code for the `provided` platform run on the `required` platform?

    Returns true if either platform is ``None``, or the platforms are equal.

    XXX Needs compatibility checks for Linux and other unixy OSes.
    """
    if provided is None or required is None or provided == required:
        # easy case
        return True

    # Mac OS X special cases
    reqMac = macosVersionString.match(required)
    if reqMac:
        provMac = macosVersionString.match(provided)

        # is this a Mac package?
        if not provMac:
            # this is backwards compatibility for packages built before
            # setuptools 0.6. All packages built after this point will
            # use the new macosx designation.
            provDarwin = darwinVersionString.match(provided)
            if provDarwin:
                dversion = int(provDarwin.group(1))
                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
                if dversion == 7 and macosversion >= "10.3" or \
                        dversion == 8 and macosversion >= "10.4":
                    return True
            # egg isn't macosx or legacy darwin
            return False

        # are they the same major version and machine type?
        if provMac.group(1) != reqMac.group(1) or \
                provMac.group(3) != reqMac.group(3):
            return False

        # is the required OS major update >= the provided one?
        if int(provMac.group(2)) > int(reqMac.group(2)):
            return False

        return True

    # XXX Linux and other platforms' special cases should go here
    return False


def run_script(dist_spec, script_name):
    """Locate distribution `dist_spec` and run its `script_name` script"""
    ns = sys._getframe(1).f_globals
    name = ns['__name__']
    ns.clear()
    ns['__name__'] = name
    require(dist_spec)[0].run_script(script_name, ns)


# backward compatibility
run_main = run_script


def get_distribution(dist):
    """Return a current distribution object for a Requirement or string"""
    if isinstance(dist, six.string_types):
        dist = Requirement.parse(dist)
    if isinstance(dist, Requirement):
        dist = get_provider(dist)
    if not isinstance(dist, Distribution):
        raise TypeError("Expected string, Requirement, or Distribution", dist)
    return dist


def load_entry_point(dist, group, name):
    """Return `name` entry point of `group` for `dist` or raise ImportError"""
    return get_distribution(dist).load_entry_point(group, name)


def get_entry_map(dist, group=None):
    """Return the entry point map for `group`, or the full entry map"""
    return get_distribution(dist).get_entry_map(group)


def get_entry_info(dist, group, name):
    """Return the EntryPoint object for `group`+`name`, or ``None``"""
    return get_distribution(dist).get_entry_info(group, name)


class IMetadataProvider:
    def has_metadata(name):
        """Does the package's distribution contain the named metadata?"""

    def get_metadata(name):
        """The named metadata resource as a string"""

    def get_metadata_lines(name):
        """Yield named metadata resource as list of non-blank non-comment lines

       Leading and trailing whitespace is stripped from each line, and lines
       with ``#`` as the first non-blank character are omitted."""

    def metadata_isdir(name):
        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""

    def metadata_listdir(name):
        """List of metadata names in the directory (like ``os.listdir()``)"""

    def run_script(script_name, namespace):
        """Execute the named script in the supplied namespace dictionary"""


class IResourceProvider(IMetadataProvider):
    """An object that provides access to package resources"""

    def get_resource_filename(manager, resource_name):
        """Return a true filesystem path for `resource_name`

        `manager` must be an ``IResourceManager``"""

    def get_resource_stream(manager, resource_name):
        """Return a readable file-like object for `resource_name`

        `manager` must be an ``IResourceManager``"""

    def get_resource_string(manager, resource_name):
        """Return a string containing the contents of `resource_name`

        `manager` must be an ``IResourceManager``"""

    def has_resource(resource_name):
        """Does the package contain the named resource?"""

    def resource_isdir(resource_name):
        """Is the named resource a directory?  (like ``os.path.isdir()``)"""

    def resource_listdir(resource_name):
        """List of resource names in the directory (like ``os.listdir()``)"""


class WorkingSet:
    """A collection of active distributions on sys.path (or a similar list)"""

    def __init__(self, entries=None):
        """Create working set from list of path entries (default=sys.path)"""
        self.entries = []
        self.entry_keys = {}
        self.by_key = {}
        self.callbacks = []

        if entries is None:
            entries = sys.path

        for entry in entries:
            self.add_entry(entry)

    @classmethod
    def _build_master(cls):
        """
        Prepare the master working set.
        """
        ws = cls()
        try:
            from __main__ import __requires__
        except ImportError:
            # The main program does not list any requirements
            return ws

        # ensure the requirements are met
        try:
            ws.require(__requires__)
        except VersionConflict:
            return cls._build_from_requirements(__requires__)

        return ws

    @classmethod
    def _build_from_requirements(cls, req_spec):
        """
        Build a working set from a requirement spec. Rewrites sys.path.
        """
        # try it without defaults already on sys.path
        # by starting with an empty path
        ws = cls([])
        reqs = parse_requirements(req_spec)
        dists = ws.resolve(reqs, Environment())
        for dist in dists:
            ws.add(dist)

        # add any missing entries from sys.path
        for entry in sys.path:
            if entry not in ws.entries:
                ws.add_entry(entry)

        # then copy back to sys.path
        sys.path[:] = ws.entries
        return ws

    def add_entry(self, entry):
        """Add a path item to ``.entries``, finding any distributions on it

        ``find_distributions(entry, True)`` is used to find distributions
        corresponding to the path entry, and they are added.  `entry` is
        always appended to ``.entries``, even if it is already present.
        (This is because ``sys.path`` can contain the same value more than
        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
        equal ``sys.path``.)
        """
        self.entry_keys.setdefault(entry, [])
        self.entries.append(entry)
        for dist in find_distributions(entry, True):
            self.add(dist, entry, False)

    def __contains__(self, dist):
        """True if `dist` is the active distribution for its project"""
        return self.by_key.get(dist.key) == dist

    def find(self, req):
        """Find a distribution matching requirement `req`

        If there is an active distribution for the requested project, this
        returns it as long as it meets the version requirement specified by
        `req`.  But, if there is an active distribution for the project and it
        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
        If there is no active distribution for the requested project, ``None``
        is returned.
        """
        dist = self.by_key.get(req.key)
        if dist is not None and dist not in req:
            # XXX add more info
            raise VersionConflict(dist, req)
        return dist

    def iter_entry_points(self, group, name=None):
        """Yield entry point objects from `group` matching `name`

        If `name` is None, yields all entry points in `group` from all
        distributions in the working set, otherwise only ones matching
        both `group` and `name` are yielded (in distribution order).
        """
        return (
            entry
            for dist in self
            for entry in dist.get_entry_map(group).values()
            if name is None or name == entry.name
        )

    def run_script(self, requires, script_name):
        """Locate distribution for `requires` and run `script_name` script"""
        ns = sys._getframe(1).f_globals
        name = ns['__name__']
        ns.clear()
        ns['__name__'] = name
        self.require(requires)[0].run_script(script_name, ns)

    def __iter__(self):
        """Yield distributions for non-duplicate projects in the working set

        The yield order is the order in which the items' path entries were
        added to the working set.
        """
        seen = {}
        for item in self.entries:
            if item not in self.entry_keys:
                # workaround a cache issue
                continue

            for key in self.entry_keys[item]:
                if key not in seen:
                    seen[key] = 1
                    yield self.by_key[key]

    def add(self, dist, entry=None, insert=True, replace=False):
        """Add `dist` to working set, associated with `entry`

        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
        On exit from this routine, `entry` is added to the end of the working
        set's ``.entries`` (if it wasn't already present).

        `dist` is only added to the working set if it's for a project that
        doesn't already have a distribution in the set, unless `replace=True`.
        If it's added, any callbacks registered with the ``subscribe()`` method
        will be called.
        """
        if insert:
            dist.insert_on(self.entries, entry, replace=replace)

        if entry is None:
            entry = dist.location
        keys = self.entry_keys.setdefault(entry, [])
        keys2 = self.entry_keys.setdefault(dist.location, [])
        if not replace and dist.key in self.by_key:
            # ignore hidden distros
            return

        self.by_key[dist.key] = dist
        if dist.key not in keys:
            keys.append(dist.key)
        if dist.key not in keys2:
            keys2.append(dist.key)
        self._added_new(dist)

    def resolve(self, requirements, env=None, installer=None,
                replace_conflicting=False, extras=None):
        """List all distributions needed to (recursively) meet `requirements`

        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
        if supplied, should be an ``Environment`` instance.  If
        not supplied, it defaults to all distributions available within any
        entry or distribution in the working set.  `installer`, if supplied,
        will be invoked with each requirement that cannot be met by an
        already-installed distribution; it should return a ``Distribution`` or
        ``None``.

        Unless `replace_conflicting=True`, raises a VersionConflict exception
        if
        any requirements are found on the path that have the correct name but
        the wrong version.  Otherwise, if an `installer` is supplied it will be
        invoked to obtain the correct version of the requirement and activate
        it.

        `extras` is a list of the extras to be used with these requirements.
        This is important because extra requirements may look like `my_req;
        extra = "my_extra"`, which would otherwise be interpreted as a purely
        optional requirement.  Instead, we want to be able to assert that these
        requirements are truly required.
        """

        # set up the stack
        requirements = list(requirements)[::-1]
        # set of processed requirements
        processed = {}
        # key -> dist
        best = {}
        to_activate = []

        req_extras = _ReqExtras()

        # Mapping of requirement to set of distributions that required it;
        # useful for reporting info about conflicts.
        required_by = collections.defaultdict(set)

        while requirements:
            # process dependencies breadth-first
            req = requirements.pop(0)
            if req in processed:
                # Ignore cyclic or redundant dependencies
                continue

            if not req_extras.markers_pass(req, extras):
                continue

            dist = best.get(req.key)
            if dist is None:
                # Find the best distribution and add it to the map
                dist = self.by_key.get(req.key)
                if dist is None or (dist not in req and replace_conflicting):
                    ws = self
                    if env is None:
                        if dist is None:
                            env = Environment(self.entries)
                        else:
                            # Use an empty environment and workingset to avoid
                            # any further conflicts with the conflicting
                            # distribution
                            env = Environment([])
                            ws = WorkingSet([])
                    dist = best[req.key] = env.best_match(
                        req, ws, installer,
                        replace_conflicting=replace_conflicting
                    )
                    if dist is None:
                        requirers = required_by.get(req, None)
                        raise DistributionNotFound(req, requirers)
                to_activate.append(dist)
            if dist not in req:
                # Oops, the "best" so far conflicts with a dependency
                dependent_req = required_by[req]
                raise VersionConflict(dist, req).with_context(dependent_req)

            # push the new requirements onto the stack
            new_requirements = dist.requires(req.extras)[::-1]
            requirements.extend(new_requirements)

            # Register the new requirements needed by req
            for new_requirement in new_requirements:
                required_by[new_requirement].add(req.project_name)
                req_extras[new_requirement] = req.extras

            processed[req] = True

        # return list of distros to activate
        return to_activate

    def find_plugins(
            self, plugin_env, full_env=None, installer=None, fallback=True):
        """Find all activatable distributions in `plugin_env`

        Example usage::

            distributions, errors = working_set.find_plugins(
                Environment(plugin_dirlist)
            )
            # add plugins+libs to sys.path
            map(working_set.add, distributions)
            # display errors
            print('Could not load', errors)

        The `plugin_env` should be an ``Environment`` instance that contains
        only distributions that are in the project's "plugin directory" or
        directories. The `full_env`, if supplied, should be an ``Environment``
        contains all currently-available distributions.  If `full_env` is not
        supplied, one is created automatically from the ``WorkingSet`` this
        method is called on, which will typically mean that every directory on
        ``sys.path`` will be scanned for distributions.

        `installer` is a standard installer callback as used by the
        ``resolve()`` method. The `fallback` flag indicates whether we should
        attempt to resolve older versions of a plugin if the newest version
        cannot be resolved.

        This method returns a 2-tuple: (`distributions`, `error_info`), where
        `distributions` is a list of the distributions found in `plugin_env`
        that were loadable, along with any other distributions that are needed
        to resolve their dependencies.  `error_info` is a dictionary mapping
        unloadable plugin distributions to an exception instance describing the
        error that occurred. Usually this will be a ``DistributionNotFound`` or
        ``VersionConflict`` instance.
        """

        plugin_projects = list(plugin_env)
        # scan project names in alphabetic order
        plugin_projects.sort()

        error_info = {}
        distributions = {}

        if full_env is None:
            env = Environment(self.entries)
            env += plugin_env
        else:
            env = full_env + plugin_env

        shadow_set = self.__class__([])
        # put all our entries in shadow_set
        list(map(shadow_set.add, self))

        for project_name in plugin_projects:

            for dist in plugin_env[project_name]:

                req = [dist.as_requirement()]

                try:
                    resolvees = shadow_set.resolve(req, env, installer)

                except ResolutionError as v:
                    # save error info
                    error_info[dist] = v
                    if fallback:
                        # try the next older version of project
                        continue
                    else:
                        # give up on this project, keep going
                        break

                else:
                    list(map(shadow_set.add, resolvees))
                    distributions.update(dict.fromkeys(resolvees))

                    # success, no need to try any more versions of this project
                    break

        distributions = list(distributions)
        distributions.sort()

        return distributions, error_info

    def require(self, *requirements):
        """Ensure that distributions matching `requirements` are activated

        `requirements` must be a string or a (possibly-nested) sequence
        thereof, specifying the distributions and versions required.  The
        return value is a sequence of the distributions that needed to be
        activated to fulfill the requirements; all relevant distributions are
        included, even if they were already activated in this working set.
        """
        needed = self.resolve(parse_requirements(requirements))

        for dist in needed:
            self.add(dist)

        return needed

    def subscribe(self, callback, existing=True):
        """Invoke `callback` for all distributions

        If `existing=True` (default),
        call on all existing ones, as well.
        """
        if callback in self.callbacks:
            return
        self.callbacks.append(callback)
        if not existing:
            return
        for dist in self:
            callback(dist)

    def _added_new(self, dist):
        for callback in self.callbacks:
            callback(dist)

    def __getstate__(self):
        return (
            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
            self.callbacks[:]
        )

    def __setstate__(self, e_k_b_c):
        entries, keys, by_key, callbacks = e_k_b_c
        self.entries = entries[:]
        self.entry_keys = keys.copy()
        self.by_key = by_key.copy()
        self.callbacks = callbacks[:]


class _ReqExtras(dict):
    """
    Map each requirement to the extras that demanded it.
    """

    def markers_pass(self, req, extras=None):
        """
        Evaluate markers for req against each extra that
        demanded it.

        Return False if the req has a marker and fails
        evaluation. Otherwise, return True.
        """
        extra_evals = (
            req.marker.evaluate({'extra': extra})
            for extra in self.get(req, ()) + (extras or (None,))
        )
        return not req.marker or any(extra_evals)


class Environment:
    """Searchable snapshot of distributions on a search path"""

    def __init__(
            self, search_path=None, platform=get_supported_platform(),
            python=PY_MAJOR):
        """Snapshot distributions available on a search path

        Any distributions found on `search_path` are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.

        `platform` is an optional string specifying the name of the platform
        that platform-specific distributions must be compatible with.  If
        unspecified, it defaults to the current platform.  `python` is an
        optional string naming the desired version of Python (e.g. ``'3.6'``);
        it defaults to the current version.

        You may explicitly set `platform` (and/or `python`) to ``None`` if you
        wish to map *all* distributions, not just those compatible with the
        running platform or Python version.
        """
        self._distmap = {}
        self.platform = platform
        self.python = python
        self.scan(search_path)

    def can_add(self, dist):
        """Is distribution `dist` acceptable for this environment?

        The distribution must match the platform and python version
        requirements specified when this environment was created, or False
        is returned.
        """
        py_compat = (
            self.python is None
            or dist.py_version is None
            or dist.py_version == self.python
        )
        return py_compat and compatible_platforms(dist.platform, self.platform)

    def remove(self, dist):
        """Remove `dist` from the environment"""
        self._distmap[dist.key].remove(dist)

    def scan(self, search_path=None):
        """Scan `search_path` for distributions usable in this environment

        Any distributions found are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.  Only distributions conforming to
        the platform/python version defined at initialization are added.
        """
        if search_path is None:
            search_path = sys.path

        for item in search_path:
            for dist in find_distributions(item):
                self.add(dist)

    def __getitem__(self, project_name):
        """Return a newest-to-oldest list of distributions for `project_name`

        Uses case-insensitive `project_name` comparison, assuming all the
        project's distributions use their project's name converted to all
        lowercase as their key.

        """
        distribution_key = project_name.lower()
        return self._distmap.get(distribution_key, [])

    def add(self, dist):
        """Add `dist` if we ``can_add()`` it and it has not already been added
        """
        if self.can_add(dist) and dist.has_version():
            dists = self._distmap.setdefault(dist.key, [])
            if dist not in dists:
                dists.append(dist)
                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)

    def best_match(
            self, req, working_set, installer=None, replace_conflicting=False):
        """Find distribution best matching `req` and usable on `working_set`

        This calls the ``find(req)`` method of the `working_set` to see if a
        suitable distribution is already active.  (This may raise
        ``VersionConflict`` if an unsuitable version of the project is already
        active in the specified `working_set`.)  If a suitable distribution
        isn't active, this method returns the newest distribution in the
        environment that meets the ``Requirement`` in `req`.  If no suitable
        distribution is found, and `installer` is supplied, then the result of
        calling the environment's ``obtain(req, installer)`` method will be
        returned.
        """
        try:
            dist = working_set.find(req)
        except VersionConflict:
            if not replace_conflicting:
                raise
            dist = None
        if dist is not None:
            return dist
        for dist in self[req.key]:
            if dist in req:
                return dist
        # try to download/install
        return self.obtain(req, installer)

    def obtain(self, requirement, installer=None):
        """Obtain a distribution matching `requirement` (e.g. via download)

        Obtain a distro that matches requirement (e.g. via download).  In the
        base ``Environment`` class, this routine just returns
        ``installer(requirement)``, unless `installer` is None, in which case
        None is returned instead.  This method is a hook that allows subclasses
        to attempt other ways of obtaining a distribution before falling back
        to the `installer` argument."""
        if installer is not None:
            return installer(requirement)

    def __iter__(self):
        """Yield the unique project names of the available distributions"""
        for key in self._distmap.keys():
            if self[key]:
                yield key

    def __iadd__(self, other):
        """In-place addition of a distribution or environment"""
        if isinstance(other, Distribution):
            self.add(other)
        elif isinstance(other, Environment):
            for project in other:
                for dist in other[project]:
                    self.add(dist)
        else:
            raise TypeError("Can't add %r to environment" % (other,))
        return self

    def __add__(self, other):
        """Add an environment or distribution to an environment"""
        new = self.__class__([], platform=None, python=None)
        for env in self, other:
            new += env
        return new


# XXX backward compatibility
AvailableDistributions = Environment


class ExtractionError(RuntimeError):
    """An error occurred extracting a resource

    The following attributes are available from instances of this exception:

    manager
        The resource manager that raised this exception

    cache_path
        The base directory for resource extraction

    original_error
        The exception instance that caused extraction to fail
    """


class ResourceManager:
    """Manage resource extraction and packages"""
    extraction_path = None

    def __init__(self):
        self.cached_files = {}

    def resource_exists(self, package_or_requirement, resource_name):
        """Does the named resource exist?"""
        return get_provider(package_or_requirement).has_resource(resource_name)

    def resource_isdir(self, package_or_requirement, resource_name):
        """Is the named resource an existing directory?"""
        return get_provider(package_or_requirement).resource_isdir(
            resource_name
        )

    def resource_filename(self, package_or_requirement, resource_name):
        """Return a true filesystem path for specified resource"""
        return get_provider(package_or_requirement).get_resource_filename(
            self, resource_name
        )

    def resource_stream(self, package_or_requirement, resource_name):
        """Return a readable file-like object for specified resource"""
        return get_provider(package_or_requirement).get_resource_stream(
            self, resource_name
        )

    def resource_string(self, package_or_requirement, resource_name):
        """Return specified resource as a string"""
        return get_provider(package_or_requirement).get_resource_string(
            self, resource_name
        )

    def resource_listdir(self, package_or_requirement, resource_name):
        """List the contents of the named resource directory"""
        return get_provider(package_or_requirement).resource_listdir(
            resource_name
        )

    def extraction_error(self):
        """Give an error message for problems extracting file(s)"""

        old_exc = sys.exc_info()[1]
        cache_path = self.extraction_path or get_default_cache()

        tmpl = textwrap.dedent("""
            Can't extract file(s) to egg cache

            The following error occurred while trying to extract file(s)
            to the Python egg cache:

              {old_exc}

            The Python egg cache directory is currently set to:

              {cache_path}

            Perhaps your account does not have write access to this directory?
            You can change the cache directory by setting the PYTHON_EGG_CACHE
            environment variable to point to an accessible directory.
            """).lstrip()
        err = ExtractionError(tmpl.format(**locals()))
        err.manager = self
        err.cache_path = cache_path
        err.original_error = old_exc
        raise err

    def get_cache_path(self, archive_name, names=()):
        """Return absolute location in cache for `archive_name` and `names`

        The parent directory of the resulting path will be created if it does
        not already exist.  `archive_name` should be the base filename of the
        enclosing egg (which may not be the name of the enclosing zipfile!),
        including its ".egg" extension.  `names`, if provided, should be a
        sequence of path name parts "under" the egg's extraction location.

        This method should only be called by resource providers that need to
        obtain an extraction location, and only for names they intend to
        extract, as it tracks the generated names for possible cleanup later.
        """
        extract_path = self.extraction_path or get_default_cache()
        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
        try:
            _bypass_ensure_directory(target_path)
        except Exception:
            self.extraction_error()

        self._warn_unsafe_extraction_path(extract_path)

        self.cached_files[target_path] = 1
        return target_path

    @staticmethod
    def _warn_unsafe_extraction_path(path):
        """
        If the default extraction path is overridden and set to an insecure
        location, such as /tmp, it opens up an opportunity for an attacker to
        replace an extracted file with an unauthorized payload. Warn the user
        if a known insecure location is used.

        See Distribute #375 for more details.
        """
        if os.name == 'nt' and not path.startswith(os.environ['windir']):
            # On Windows, permissions are generally restrictive by default
            #  and temp directories are not writable by other users, so
            #  bypass the warning.
            return
        mode = os.stat(path).st_mode
        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
            msg = (
                "%s is writable by group/others and vulnerable to attack "
                "when "
                "used with get_resource_filename. Consider a more secure "
                "location (set with .set_extraction_path or the "
                "PYTHON_EGG_CACHE environment variable)." % path
            )
            warnings.warn(msg, UserWarning)

    def postprocess(self, tempname, filename):
        """Perform any platform-specific postprocessing of `tempname`

        This is where Mac header rewrites should be done; other platforms don't
        have anything special they should do.

        Resource providers should call this method ONLY after successfully
        extracting a compressed resource.  They must NOT call it on resources
        that are already in the filesystem.

        `tempname` is the current (temporary) name of the file, and `filename`
        is the name it will be renamed to by the caller after this routine
        returns.
        """

        if os.name == 'posix':
            # Make the resource executable
            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
            os.chmod(tempname, mode)

    def set_extraction_path(self, path):
        """Set the base path where resources will be extracted to, if needed.

        If you do not call this routine before any extractions take place, the
        path defaults to the return value of ``get_default_cache()``.  (Which
        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
        platform-specific fallbacks.  See that routine's documentation for more
        details.)

        Resources are extracted to subdirectories of this path based upon
        information given by the ``IResourceProvider``.  You may set this to a
        temporary directory, but then you must call ``cleanup_resources()`` to
        delete the extracted files when done.  There is no guarantee that
        ``cleanup_resources()`` will be able to remove all extracted files.

        (Note: you may not change the extraction path for a given resource
        manager once resources have been extracted, unless you first call
        ``cleanup_resources()``.)
        """
        if self.cached_files:
            raise ValueError(
                "Can't change extraction path, files already extracted"
            )

        self.extraction_path = path

    def cleanup_resources(self, force=False):
        """
        Delete all extracted resource files and directories, returning a list
        of the file and directory names that could not be successfully removed.
        This function does not have any concurrency protection, so it should
        generally only be called when the extraction path is a temporary
        directory exclusive to a single process.  This method is not
        automatically called; you must call it explicitly or register it as an
        ``atexit`` function if you wish to ensure cleanup of a temporary
        directory used for extractions.
        """
        # XXX


def get_default_cache():
    """
    Return the ``PYTHON_EGG_CACHE`` environment variable
    or a platform-relevant user cache dir for an app
    named "Python-Eggs".
    """
    return (
        os.environ.get('PYTHON_EGG_CACHE')
        or appdirs.user_cache_dir(appname='Python-Eggs')
    )


def safe_name(name):
    """Convert an arbitrary string to a standard distribution name

    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
    """
    return re.sub('[^A-Za-z0-9.]+', '-', name)


def safe_version(version):
    """
    Convert an arbitrary string to a standard version string
    """
    try:
        # normalize the version
        return str(packaging.version.Version(version))
    except packaging.version.InvalidVersion:
        version = version.replace(' ', '.')
        return re.sub('[^A-Za-z0-9.]+', '-', version)


def safe_extra(extra):
    """Convert an arbitrary string to a standard 'extra' name

    Any runs of non-alphanumeric characters are replaced with a single '_',
    and the result is always lowercased.
    """
    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()


def to_filename(name):
    """Convert a project or version name to its filename-escaped form

    Any '-' characters are currently replaced with '_'.
    """
    return name.replace('-', '_')


def invalid_marker(text):
    """
    Validate text as a PEP 508 environment marker; return an exception
    if invalid or False otherwise.
    """
    try:
        evaluate_marker(text)
    except SyntaxError as e:
        e.filename = None
        e.lineno = None
        return e
    return False


def evaluate_marker(text, extra=None):
    """
    Evaluate a PEP 508 environment marker.
    Return a boolean indicating the marker result in this environment.
    Raise SyntaxError if marker is invalid.

    This implementation uses the 'pyparsing' module.
    """
    try:
        marker = packaging.markers.Marker(text)
        return marker.evaluate()
    except packaging.markers.InvalidMarker as e:
        raise SyntaxError(e)


class NullProvider:
    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""

    egg_name = None
    egg_info = None
    loader = None

    def __init__(self, module):
        self.loader = getattr(module, '__loader__', None)
        self.module_path = os.path.dirname(getattr(module, '__file__', ''))

    def get_resource_filename(self, manager, resource_name):
        return self._fn(self.module_path, resource_name)

    def get_resource_stream(self, manager, resource_name):
        return io.BytesIO(self.get_resource_string(manager, resource_name))

    def get_resource_string(self, manager, resource_name):
        return self._get(self._fn(self.module_path, resource_name))

    def has_resource(self, resource_name):
        return self._has(self._fn(self.module_path, resource_name))

    def _get_metadata_path(self, name):
        return self._fn(self.egg_info, name)

    def has_metadata(self, name):
        if not self.egg_info:
            return self.egg_info

        path = self._get_metadata_path(name)
        return self._has(path)

    def get_metadata(self, name):
        if not self.egg_info:
            return ""
        path = self._get_metadata_path(name)
        value = self._get(path)
        if six.PY2:
            return value
        try:
            return value.decode('utf-8')
        except UnicodeDecodeError as exc:
            # Include the path in the error message to simplify
            # troubleshooting, and without changing the exception type.
            exc.reason += ' in {} file at path: {}'.format(name, path)
            raise

    def get_metadata_lines(self, name):
        return yield_lines(self.get_metadata(name))

    def resource_isdir(self, resource_name):
        return self._isdir(self._fn(self.module_path, resource_name))

    def metadata_isdir(self, name):
        return self.egg_info and self._isdir(self._fn(self.egg_info, name))

    def resource_listdir(self, resource_name):
        return self._listdir(self._fn(self.module_path, resource_name))

    def metadata_listdir(self, name):
        if self.egg_info:
            return self._listdir(self._fn(self.egg_info, name))
        return []

    def run_script(self, script_name, namespace):
        script = 'scripts/' + script_name
        if not self.has_metadata(script):
            raise ResolutionError(
                "Script {script!r} not found in metadata at {self.egg_info!r}"
                .format(**locals()),
            )
        script_text = self.get_metadata(script).replace('\r\n', '\n')
        script_text = script_text.replace('\r', '\n')
        script_filename = self._fn(self.egg_info, script)
        namespace['__file__'] = script_filename
        if os.path.exists(script_filename):
            source = open(script_filename).read()
            code = compile(source, script_filename, 'exec')
            exec(code, namespace, namespace)
        else:
            from linecache import cache
            cache[script_filename] = (
                len(script_text), 0, script_text.split('\n'), script_filename
            )
            script_code = compile(script_text, script_filename, 'exec')
            exec(script_code, namespace, namespace)

    def _has(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )

    def _isdir(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )

    def _listdir(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )

    def _fn(self, base, resource_name):
        self._validate_resource_path(resource_name)
        if resource_name:
            return os.path.join(base, *resource_name.split('/'))
        return base

    @staticmethod
    def _validate_resource_path(path):
        """
        Validate the resource paths according to the docs.
        https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access

        >>> warned = getfixture('recwarn')
        >>> warnings.simplefilter('always')
        >>> vrp = NullProvider._validate_resource_path
        >>> vrp('foo/bar.txt')
        >>> bool(warned)
        False
        >>> vrp('../foo/bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('/foo/bar.txt')
        >>> bool(warned)
        True
        >>> vrp('foo/../../bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('foo/f../bar.txt')
        >>> bool(warned)
        False

        Windows path separators are straight-up disallowed.
        >>> vrp(r'\\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path \
is not allowed.

        >>> vrp(r'C:\\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path \
is not allowed.

        Blank values are allowed

        >>> vrp('')
        >>> bool(warned)
        False

        Non-string values are not.

        >>> vrp(None)
        Traceback (most recent call last):
        ...
        AttributeError: ...
        """
        invalid = (
            os.path.pardir in path.split(posixpath.sep) or
            posixpath.isabs(path) or
            ntpath.isabs(path)
        )
        if not invalid:
            return

        msg = "Use of .. or absolute path in a resource path is not allowed."

        # Aggressively disallow Windows absolute paths
        if ntpath.isabs(path) and not posixpath.isabs(path):
            raise ValueError(msg)

        # for compatibility, warn; in future
        # raise ValueError(msg)
        warnings.warn(
            msg[:-1] + " and will raise exceptions in a future release.",
            DeprecationWarning,
            stacklevel=4,
        )

    def _get(self, path):
        if hasattr(self.loader, 'get_data'):
            return self.loader.get_data(path)
        raise NotImplementedError(
            "Can't perform this operation for loaders without 'get_data()'"
        )


register_loader_type(object, NullProvider)


class EggProvider(NullProvider):
    """Provider based on a virtual filesystem"""

    def __init__(self, module):
        NullProvider.__init__(self, module)
        self._setup_prefix()

    def _setup_prefix(self):
        # we assume here that our metadata may be nested inside a "basket"
        # of multiple eggs; that's why we use module_path instead of .archive
        path = self.module_path
        old = None
        while path != old:
            if _is_egg_path(path):
                self.egg_name = os.path.basename(path)
                self.egg_info = os.path.join(path, 'EGG-INFO')
                self.egg_root = path
                break
            old = path
            path, base = os.path.split(path)


class DefaultProvider(EggProvider):
    """Provides access to package resources in the filesystem"""

    def _has(self, path):
        return os.path.exists(path)

    def _isdir(self, path):
        return os.path.isdir(path)

    def _listdir(self, path):
        return os.listdir(path)

    def get_resource_stream(self, manager, resource_name):
        return open(self._fn(self.module_path, resource_name), 'rb')

    def _get(self, path):
        with open(path, 'rb') as stream:
            return stream.read()

    @classmethod
    def _register(cls):
        loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
        for name in loader_names:
            loader_cls = getattr(importlib_machinery, name, type(None))
            register_loader_type(loader_cls, cls)


DefaultProvider._register()


class EmptyProvider(NullProvider):
    """Provider that returns nothing for all requests"""

    module_path = None

    _isdir = _has = lambda self, path: False

    def _get(self, path):
        return ''

    def _listdir(self, path):
        return []

    def __init__(self):
        pass


empty_provider = EmptyProvider()


class ZipManifests(dict):
    """
    zip manifest builder
    """

    @classmethod
    def build(cls, path):
        """
        Build a dictionary similar to the zipimport directory
        caches, except instead of tuples, store ZipInfo objects.

        Use a platform-specific path separator (os.sep) for the path keys
        for compatibility with pypy on Windows.
        """
        with zipfile.ZipFile(path) as zfile:
            items = (
                (
                    name.replace('/', os.sep),
                    zfile.getinfo(name),
                )
                for name in zfile.namelist()
            )
            return dict(items)

    load = build


class MemoizedZipManifests(ZipManifests):
    """
    Memoized zipfile manifests.
    """
    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')

    def load(self, path):
        """
        Load a manifest at path or return a suitable manifest already loaded.
        """
        path = os.path.normpath(path)
        mtime = os.stat(path).st_mtime

        if path not in self or self[path].mtime != mtime:
            manifest = self.build(path)
            self[path] = self.manifest_mod(manifest, mtime)

        return self[path].manifest


class ZipProvider(EggProvider):
    """Resource support for zips and eggs"""

    eagers = None
    _zip_manifests = MemoizedZipManifests()

    def __init__(self, module):
        EggProvider.__init__(self, module)
        self.zip_pre = self.loader.archive + os.sep

    def _zipinfo_name(self, fspath):
        # Convert a virtual filename (full path to file) into a zipfile subpath
        # usable with the zipimport directory cache for our target archive
        fspath = fspath.rstrip(os.sep)
        if fspath == self.loader.archive:
            return ''
        if fspath.startswith(self.zip_pre):
            return fspath[len(self.zip_pre):]
        raise AssertionError(
            "%s is not a subpath of %s" % (fspath, self.zip_pre)
        )

    def _parts(self, zip_path):
        # Convert a zipfile subpath into an egg-relative path part list.
        # pseudo-fs path
        fspath = self.zip_pre + zip_path
        if fspath.startswith(self.egg_root + os.sep):
            return fspath[len(self.egg_root) + 1:].split(os.sep)
        raise AssertionError(
            "%s is not a subpath of %s" % (fspath, self.egg_root)
        )

    @property
    def zipinfo(self):
        return self._zip_manifests.load(self.loader.archive)

    def get_resource_filename(self, manager, resource_name):
        if not self.egg_name:
            raise NotImplementedError(
                "resource_filename() only supported for .egg, not .zip"
            )
        # no need to lock for extraction, since we use temp names
        zip_path = self._resource_to_zip(resource_name)
        eagers = self._get_eager_resources()
        if '/'.join(self._parts(zip_path)) in eagers:
            for name in eagers:
                self._extract_resource(manager, self._eager_to_zip(name))
        return self._extract_resource(manager, zip_path)

    @staticmethod
    def _get_date_and_size(zip_stat):
        size = zip_stat.file_size
        # ymdhms+wday, yday, dst
        date_time = zip_stat.date_time + (0, 0, -1)
        # 1980 offset already done
        timestamp = time.mktime(date_time)
        return timestamp, size

    def _extract_resource(self, manager, zip_path):

        if zip_path in self._index():
            for name in self._index()[zip_path]:
                last = self._extract_resource(
                    manager, os.path.join(zip_path, name)
                )
            # return the extracted directory name
            return os.path.dirname(last)

        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])

        if not WRITE_SUPPORT:
            raise IOError('"os.rename" and "os.unlink" are not supported '
                          'on this platform')
        try:

            real_path = manager.get_cache_path(
                self.egg_name, self._parts(zip_path)
            )

            if self._is_current(real_path, zip_path):
                return real_path

            outf, tmpnam = _mkstemp(
                ".$extract",
                dir=os.path.dirname(real_path),
            )
            os.write(outf, self.loader.get_data(zip_path))
            os.close(outf)
            utime(tmpnam, (timestamp, timestamp))
            manager.postprocess(tmpnam, real_path)

            try:
                rename(tmpnam, real_path)

            except os.error:
                if os.path.isfile(real_path):
                    if self._is_current(real_path, zip_path):
                        # the file became current since it was checked above,
                        #  so proceed.
                        return real_path
                    # Windows, del old file and retry
                    elif os.name == 'nt':
                        unlink(real_path)
                        rename(tmpnam, real_path)
                        return real_path
                raise

        except os.error:
            # report a user-friendly error
            manager.extraction_error()

        return real_path

    def _is_current(self, file_path, zip_path):
        """
        Return True if the file_path is current for this zip_path
        """
        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
        if not os.path.isfile(file_path):
            return False
        stat = os.stat(file_path)
        if stat.st_size != size or stat.st_mtime != timestamp:
            return False
        # check that the contents match
        zip_contents = self.loader.get_data(zip_path)
        with open(file_path, 'rb') as f:
            file_contents = f.read()
        return zip_contents == file_contents

    def _get_eager_resources(self):
        if self.eagers is None:
            eagers = []
            for name in ('native_libs.txt', 'eager_resources.txt'):
                if self.has_metadata(name):
                    eagers.extend(self.get_metadata_lines(name))
            self.eagers = eagers
        return self.eagers

    def _index(self):
        try:
            return self._dirindex
        except AttributeError:
            ind = {}
            for path in self.zipinfo:
                parts = path.split(os.sep)
                while parts:
                    parent = os.sep.join(parts[:-1])
                    if parent in ind:
                        ind[parent].append(parts[-1])
                        break
                    else:
                        ind[parent] = [parts.pop()]
            self._dirindex = ind
            return ind

    def _has(self, fspath):
        zip_path = self._zipinfo_name(fspath)
        return zip_path in self.zipinfo or zip_path in self._index()

    def _isdir(self, fspath):
        return self._zipinfo_name(fspath) in self._index()

    def _listdir(self, fspath):
        return list(self._index().get(self._zipinfo_name(fspath), ()))

    def _eager_to_zip(self, resource_name):
        return self._zipinfo_name(self._fn(self.egg_root, resource_name))

    def _resource_to_zip(self, resource_name):
        return self._zipinfo_name(self._fn(self.module_path, resource_name))


register_loader_type(zipimport.zipimporter, ZipProvider)


class FileMetadata(EmptyProvider):
    """Metadata handler for standalone PKG-INFO files

    Usage::

        metadata = FileMetadata("/path/to/PKG-INFO")

    This provider rejects all data and metadata requests except for PKG-INFO,
    which is treated as existing, and will be the contents of the file at
    the provided location.
    """

    def __init__(self, path):
        self.path = path

    def _get_metadata_path(self, name):
        return self.path

    def has_metadata(self, name):
        return name == 'PKG-INFO' and os.path.isfile(self.path)

    def get_metadata(self, name):
        if name != 'PKG-INFO':
            raise KeyError("No metadata except PKG-INFO is available")

        with io.open(self.path, encoding='utf-8', errors="replace") as f:
            metadata = f.read()
        self._warn_on_replacement(metadata)
        return metadata

    def _warn_on_replacement(self, metadata):
        # Python 2.7 compat for: replacement_char = '�'
        replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
        if replacement_char in metadata:
            tmpl = "{self.path} could not be properly decoded in UTF-8"
            msg = tmpl.format(**locals())
            warnings.warn(msg)

    def get_metadata_lines(self, name):
        return yield_lines(self.get_metadata(name))


class PathMetadata(DefaultProvider):
    """Metadata provider for egg directories

    Usage::

        # Development eggs:

        egg_info = "/path/to/PackageName.egg-info"
        base_dir = os.path.dirname(egg_info)
        metadata = PathMetadata(base_dir, egg_info)
        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)

        # Unpacked egg directories:

        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
        dist = Distribution.from_filename(egg_path, metadata=metadata)
    """

    def __init__(self, path, egg_info):
        self.module_path = path
        self.egg_info = egg_info


class EggMetadata(ZipProvider):
    """Metadata provider for .egg files"""

    def __init__(self, importer):
        """Create a metadata provider from a zipimporter"""

        self.zip_pre = importer.archive + os.sep
        self.loader = importer
        if importer.prefix:
            self.module_path = os.path.join(importer.archive, importer.prefix)
        else:
            self.module_path = importer.archive
        self._setup_prefix()


_declare_state('dict', _distribution_finders={})


def register_finder(importer_type, distribution_finder):
    """Register `distribution_finder` to find distributions in sys.path items

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `distribution_finder` is a callable that, passed a path
    item and the importer instance, yields ``Distribution`` instances found on
    that path item.  See ``pkg_resources.find_on_path`` for an example."""
    _distribution_finders[importer_type] = distribution_finder


def find_distributions(path_item, only=False):
    """Yield distributions accessible via `path_item`"""
    importer = get_importer(path_item)
    finder = _find_adapter(_distribution_finders, importer)
    return finder(importer, path_item, only)


def find_eggs_in_zip(importer, path_item, only=False):
    """
    Find eggs in zip files; possibly multiple nested eggs.
    """
    if importer.archive.endswith('.whl'):
        # wheels are not supported with this finder
        # they don't have PKG-INFO metadata, and won't ever contain eggs
        return
    metadata = EggMetadata(importer)
    if metadata.has_metadata('PKG-INFO'):
        yield Distribution.from_filename(path_item, metadata=metadata)
    if only:
        # don't yield nested distros
        return
    for subitem in metadata.resource_listdir(''):
        if _is_egg_path(subitem):
            subpath = os.path.join(path_item, subitem)
            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
            for dist in dists:
                yield dist
        elif subitem.lower().endswith('.dist-info'):
            subpath = os.path.join(path_item, subitem)
            submeta = EggMetadata(zipimport.zipimporter(subpath))
            submeta.egg_info = subpath
            yield Distribution.from_location(path_item, subitem, submeta)


register_finder(zipimport.zipimporter, find_eggs_in_zip)


def find_nothing(importer, path_item, only=False):
    return ()


register_finder(object, find_nothing)


def _by_version_descending(names):
    """
    Given a list of filenames, return them in descending order
    by version number.

    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
    >>> _by_version_descending(names)
    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
    """
    def _by_version(name):
        """
        Parse each component of the filename
        """
        name, ext = os.path.splitext(name)
        parts = itertools.chain(name.split('-'), [ext])
        return [packaging.version.parse(part) for part in parts]

    return sorted(names, key=_by_version, reverse=True)


def find_on_path(importer, path_item, only=False):
    """Yield distributions accessible on a sys.path directory"""
    path_item = _normalize_cached(path_item)

    if _is_unpacked_egg(path_item):
        yield Distribution.from_filename(
            path_item, metadata=PathMetadata(
                path_item, os.path.join(path_item, 'EGG-INFO')
            )
        )
        return

    entries = safe_listdir(path_item)

    # for performance, before sorting by version,
    # screen entries for only those that will yield
    # distributions
    filtered = (
        entry
        for entry in entries
        if dist_factory(path_item, entry, only)
    )

    # scan for .egg and .egg-info in directory
    path_item_entries = _by_version_descending(filtered)
    for entry in path_item_entries:
        fullpath = os.path.join(path_item, entry)
        factory = dist_factory(path_item, entry, only)
        for dist in factory(fullpath):
            yield dist


def dist_factory(path_item, entry, only):
    """
    Return a dist_factory for a path_item and entry
    """
    lower = entry.lower()
    is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
    return (
        distributions_from_metadata
        if is_meta else
        find_distributions
        if not only and _is_egg_path(entry) else
        resolve_egg_link
        if not only and lower.endswith('.egg-link') else
        NoDists()
    )


class NoDists:
    """
    >>> bool(NoDists())
    False

    >>> list(NoDists()('anything'))
    []
    """
    def __bool__(self):
        return False
    if six.PY2:
        __nonzero__ = __bool__

    def __call__(self, fullpath):
        return iter(())


def safe_listdir(path):
    """
    Attempt to list contents of path, but suppress some exceptions.
    """
    try:
        return os.listdir(path)
    except (PermissionError, NotADirectoryError):
        pass
    except OSError as e:
        # Ignore the directory if does not exist, not a directory or
        # permission denied
        ignorable = (
            e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
            # Python 2 on Windows needs to be handled this way :(
            or getattr(e, "winerror", None) == 267
        )
        if not ignorable:
            raise
    return ()


def distributions_from_metadata(path):
    root = os.path.dirname(path)
    if os.path.isdir(path):
        if len(os.listdir(path)) == 0:
            # empty metadata dir; skip
            return
        metadata = PathMetadata(root, path)
    else:
        metadata = FileMetadata(path)
    entry = os.path.basename(path)
    yield Distribution.from_location(
        root, entry, metadata, precedence=DEVELOP_DIST,
    )


def non_empty_lines(path):
    """
    Yield non-empty lines from file at path
    """
    with open(path) as f:
        for line in f:
            line = line.strip()
            if line:
                yield line


def resolve_egg_link(path):
    """
    Given a path to an .egg-link, resolve distributions
    present in the referenced path.
    """
    referenced_paths = non_empty_lines(path)
    resolved_paths = (
        os.path.join(os.path.dirname(path), ref)
        for ref in referenced_paths
    )
    dist_groups = map(find_distributions, resolved_paths)
    return next(dist_groups, ())


register_finder(pkgutil.ImpImporter, find_on_path)

if hasattr(importlib_machinery, 'FileFinder'):
    register_finder(importlib_machinery.FileFinder, find_on_path)

_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})


def register_namespace_handler(importer_type, namespace_handler):
    """Register `namespace_handler` to declare namespace packages

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `namespace_handler` is a callable like this::

        def namespace_handler(importer, path_entry, moduleName, module):
            # return a path_entry to use for child packages

    Namespace handlers are only called if the importer object has already
    agreed that it can handle the relevant path item, and they should only
    return a subpath if the module __path__ does not already contain an
    equivalent subpath.  For an example namespace handler, see
    ``pkg_resources.file_ns_handler``.
    """
    _namespace_handlers[importer_type] = namespace_handler


def _handle_ns(packageName, path_item):
    """Ensure that named package includes a subpath of path_item (if needed)"""

    importer = get_importer(path_item)
    if importer is None:
        return None

    # capture warnings due to #1111
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        loader = importer.find_module(packageName)

    if loader is None:
        return None
    module = sys.modules.get(packageName)
    if module is None:
        module = sys.modules[packageName] = types.ModuleType(packageName)
        module.__path__ = []
        _set_parent_ns(packageName)
    elif not hasattr(module, '__path__'):
        raise TypeError("Not a package:", packageName)
    handler = _find_adapter(_namespace_handlers, importer)
    subpath = handler(importer, path_item, packageName, module)
    if subpath is not None:
        path = module.__path__
        path.append(subpath)
        loader.load_module(packageName)
        _rebuild_mod_path(path, packageName, module)
    return subpath


def _rebuild_mod_path(orig_path, package_name, module):
    """
    Rebuild module.__path__ ensuring that all entries are ordered
    corresponding to their sys.path order
    """
    sys_path = [_normalize_cached(p) for p in sys.path]

    def safe_sys_path_index(entry):
        """
        Workaround for #520 and #513.
        """
        try:
            return sys_path.index(entry)
        except ValueError:
            return float('inf')

    def position_in_sys_path(path):
        """
        Return the ordinal of the path based on its position in sys.path
        """
        path_parts = path.split(os.sep)
        module_parts = package_name.count('.') + 1
        parts = path_parts[:-module_parts]
        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))

    new_path = sorted(orig_path, key=position_in_sys_path)
    new_path = [_normalize_cached(p) for p in new_path]

    if isinstance(module.__path__, list):
        module.__path__[:] = new_path
    else:
        module.__path__ = new_path


def declare_namespace(packageName):
    """Declare that package 'packageName' is a namespace package"""

    _imp.acquire_lock()
    try:
        if packageName in _namespace_packages:
            return

        path = sys.path
        parent, _, _ = packageName.rpartition('.')

        if parent:
            declare_namespace(parent)
            if parent not in _namespace_packages:
                __import__(parent)
            try:
                path = sys.modules[parent].__path__
            except AttributeError:
                raise TypeError("Not a package:", parent)

        # Track what packages are namespaces, so when new path items are added,
        # they can be updated
        _namespace_packages.setdefault(parent or None, []).append(packageName)
        _namespace_packages.setdefault(packageName, [])

        for path_item in path:
            # Ensure all the parent's path items are reflected in the child,
            # if they apply
            _handle_ns(packageName, path_item)

    finally:
        _imp.release_lock()


def fixup_namespace_packages(path_item, parent=None):
    """Ensure that previously-declared namespace packages include path_item"""
    _imp.acquire_lock()
    try:
        for package in _namespace_packages.get(parent, ()):
            subpath = _handle_ns(package, path_item)
            if subpath:
                fixup_namespace_packages(subpath, package)
    finally:
        _imp.release_lock()


def file_ns_handler(importer, path_item, packageName, module):
    """Compute an ns-package subpath for a filesystem or zipfile importer"""

    subpath = os.path.join(path_item, packageName.split('.')[-1])
    normalized = _normalize_cached(subpath)
    for item in module.__path__:
        if _normalize_cached(item) == normalized:
            break
    else:
        # Only return the path if it's not already there
        return subpath


register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)

if hasattr(importlib_machinery, 'FileFinder'):
    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)


def null_ns_handler(importer, path_item, packageName, module):
    return None


register_namespace_handler(object, null_ns_handler)


def normalize_path(filename):
    """Normalize a file/dir name for comparison purposes"""
    return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))


def _cygwin_patch(filename):  # pragma: nocover
    """
    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
    symlink components. Using
    os.path.abspath() works around this limitation. A fix in os.getcwd()
    would probably better, in Cygwin even more so, except
    that this seems to be by design...
    """
    return os.path.abspath(filename) if sys.platform == 'cygwin' else filename


def _normalize_cached(filename, _cache={}):
    try:
        return _cache[filename]
    except KeyError:
        _cache[filename] = result = normalize_path(filename)
        return result


def _is_egg_path(path):
    """
    Determine if given path appears to be an egg.
    """
    return path.lower().endswith('.egg')


def _is_unpacked_egg(path):
    """
    Determine if given path appears to be an unpacked egg.
    """
    return (
        _is_egg_path(path) and
        os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
    )


def _set_parent_ns(packageName):
    parts = packageName.split('.')
    name = parts.pop()
    if parts:
        parent = '.'.join(parts)
        setattr(sys.modules[parent], name, sys.modules[packageName])


def yield_lines(strs):
    """Yield non-empty/non-comment lines of a string or sequence"""
    if isinstance(strs, six.string_types):
        for s in strs.splitlines():
            s = s.strip()
            # skip blank lines/comments
            if s and not s.startswith('#'):
                yield s
    else:
        for ss in strs:
            for s in yield_lines(ss):
                yield s


MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
    r"""
    (?P<name>[^-]+) (
        -(?P<ver>[^-]+) (
            -py(?P<pyver>[^-]+) (
                -(?P<plat>.+)
            )?
        )?
    )?
    """,
    re.VERBOSE | re.IGNORECASE,
).match


class EntryPoint:
    """Object representing an advertised importable object"""

    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
        if not MODULE(module_name):
            raise ValueError("Invalid module name", module_name)
        self.name = name
        self.module_name = module_name
        self.attrs = tuple(attrs)
        self.extras = tuple(extras)
        self.dist = dist

    def __str__(self):
        s = "%s = %s" % (self.name, self.module_name)
        if self.attrs:
            s += ':' + '.'.join(self.attrs)
        if self.extras:
            s += ' [%s]' % ','.join(self.extras)
        return s

    def __repr__(self):
        return "EntryPoint.parse(%r)" % str(self)

    def load(self, require=True, *args, **kwargs):
        """
        Require packages for this EntryPoint, then resolve it.
        """
        if not require or args or kwargs:
            warnings.warn(
                "Parameters to load are deprecated.  Call .resolve and "
                ".require separately.",
                PkgResourcesDeprecationWarning,
                stacklevel=2,
            )
        if require:
            self.require(*args, **kwargs)
        return self.resolve()

    def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc))

    def require(self, env=None, installer=None):
        if self.extras and not self.dist:
            raise UnknownExtra("Can't require() without a distribution", self)

        # Get the requirements for this entry point with all its extras and
        # then resolve them. We have to pass `extras` along when resolving so
        # that the working set knows what extras we want. Otherwise, for
        # dist-info distributions, the working set will assume that the
        # requirements for that extra are purely optional and skip over them.
        reqs = self.dist.requires(self.extras)
        items = working_set.resolve(reqs, env, installer, extras=self.extras)
        list(map(working_set.add, items))

    pattern = re.compile(
        r'\s*'
        r'(?P<name>.+?)\s*'
        r'=\s*'
        r'(?P<module>[\w.]+)\s*'
        r'(:\s*(?P<attr>[\w.]+))?\s*'
        r'(?P<extras>\[.*\])?\s*$'
    )

    @classmethod
    def parse(cls, src, dist=None):
        """Parse a single entry point from string `src`

        Entry point syntax follows the form::

            name = some.module:some.attr [extra1, extra2]

        The entry name and module name are required, but the ``:attrs`` and
        ``[extras]`` parts are optional
        """
        m = cls.pattern.match(src)
        if not m:
            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
            raise ValueError(msg, src)
        res = m.groupdict()
        extras = cls._parse_extras(res['extras'])
        attrs = res['attr'].split('.') if res['attr'] else ()
        return cls(res['name'], res['module'], attrs, extras, dist)

    @classmethod
    def _parse_extras(cls, extras_spec):
        if not extras_spec:
            return ()
        req = Requirement.parse('x' + extras_spec)
        if req.specs:
            raise ValueError()
        return req.extras

    @classmethod
    def parse_group(cls, group, lines, dist=None):
        """Parse an entry point group"""
        if not MODULE(group):
            raise ValueError("Invalid group name", group)
        this = {}
        for line in yield_lines(lines):
            ep = cls.parse(line, dist)
            if ep.name in this:
                raise ValueError("Duplicate entry point", group, ep.name)
            this[ep.name] = ep
        return this

    @classmethod
    def parse_map(cls, data, dist=None):
        """Parse a map of entry point groups"""
        if isinstance(data, dict):
            data = data.items()
        else:
            data = split_sections(data)
        maps = {}
        for group, lines in data:
            if group is None:
                if not lines:
                    continue
                raise ValueError("Entry points must be listed in groups")
            group = group.strip()
            if group in maps:
                raise ValueError("Duplicate group name", group)
            maps[group] = cls.parse_group(group, lines, dist)
        return maps


def _remove_md5_fragment(location):
    if not location:
        return ''
    parsed = urllib.parse.urlparse(location)
    if parsed[-1].startswith('md5='):
        return urllib.parse.urlunparse(parsed[:-1] + ('',))
    return location


def _version_from_file(lines):
    """
    Given an iterable of lines from a Metadata file, return
    the value of the Version field, if present, or None otherwise.
    """
    def is_version_line(line):
        return line.lower().startswith('version:')
    version_lines = filter(is_version_line, lines)
    line = next(iter(version_lines), '')
    _, _, value = line.partition(':')
    return safe_version(value.strip()) or None


class Distribution:
    """Wrap an actual or potential sys.path entry w/metadata"""
    PKG_INFO = 'PKG-INFO'

    def __init__(
            self, location=None, metadata=None, project_name=None,
            version=None, py_version=PY_MAJOR, platform=None,
            precedence=EGG_DIST):
        self.project_name = safe_name(project_name or 'Unknown')
        if version is not None:
            self._version = safe_version(version)
        self.py_version = py_version
        self.platform = platform
        self.location = location
        self.precedence = precedence
        self._provider = metadata or empty_provider

    @classmethod
    def from_location(cls, location, basename, metadata=None, **kw):
        project_name, version, py_version, platform = [None] * 4
        basename, ext = os.path.splitext(basename)
        if ext.lower() in _distributionImpl:
            cls = _distributionImpl[ext.lower()]

            match = EGG_NAME(basename)
            if match:
                project_name, version, py_version, platform = match.group(
                    'name', 'ver', 'pyver', 'plat'
                )
        return cls(
            location, metadata, project_name=project_name, version=version,
            py_version=py_version, platform=platform, **kw
        )._reload_version()

    def _reload_version(self):
        return self

    @property
    def hashcmp(self):
        return (
            self.parsed_version,
            self.precedence,
            self.key,
            _remove_md5_fragment(self.location),
            self.py_version or '',
            self.platform or '',
        )

    def __hash__(self):
        return hash(self.hashcmp)

    def __lt__(self, other):
        return self.hashcmp < other.hashcmp

    def __le__(self, other):
        return self.hashcmp <= other.hashcmp

    def __gt__(self, other):
        return self.hashcmp > other.hashcmp

    def __ge__(self, other):
        return self.hashcmp >= other.hashcmp

    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            # It's not a Distribution, so they are not equal
            return False
        return self.hashcmp == other.hashcmp

    def __ne__(self, other):
        return not self == other

    # These properties have to be lazy so that we don't have to load any
    # metadata until/unless it's actually needed.  (i.e., some distributions
    # may not know their name or version without loading PKG-INFO)

    @property
    def key(self):
        try:
            return self._key
        except AttributeError:
            self._key = key = self.project_name.lower()
            return key

    @property
    def parsed_version(self):
        if not hasattr(self, "_parsed_version"):
            self._parsed_version = parse_version(self.version)

        return self._parsed_version

    def _warn_legacy_version(self):
        LV = packaging.version.LegacyVersion
        is_legacy = isinstance(self._parsed_version, LV)
        if not is_legacy:
            return

        # While an empty version is technically a legacy version and
        # is not a valid PEP 440 version, it's also unlikely to
        # actually come from someone and instead it is more likely that
        # it comes from setuptools attempting to parse a filename and
        # including it in the list. So for that we'll gate this warning
        # on if the version is anything at all or not.
        if not self.version:
            return

        tmpl = textwrap.dedent("""
            '{project_name} ({version})' is being parsed as a legacy,
            non PEP 440,
            version. You may find odd behavior and sort order.
            In particular it will be sorted as less than 0.0. It
            is recommended to migrate to PEP 440 compatible
            versions.
            """).strip().replace('\n', ' ')

        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)

    @property
    def version(self):
        try:
            return self._version
        except AttributeError:
            version = self._get_version()
            if version is None:
                path = self._get_metadata_path_for_display(self.PKG_INFO)
                msg = (
                    "Missing 'Version:' header and/or {} file at path: {}"
                ).format(self.PKG_INFO, path)
                raise ValueError(msg, self)

            return version

    @property
    def _dep_map(self):
        """
        A map of extra to its list of (direct) requirements
        for this distribution, including the null extra.
        """
        try:
            return self.__dep_map
        except AttributeError:
            self.__dep_map = self._filter_extras(self._build_dep_map())
        return self.__dep_map

    @staticmethod
    def _filter_extras(dm):
        """
        Given a mapping of extras to dependencies, strip off
        environment markers and filter out any dependencies
        not matching the markers.
        """
        for extra in list(filter(None, dm)):
            new_extra = extra
            reqs = dm.pop(extra)
            new_extra, _, marker = extra.partition(':')
            fails_marker = marker and (
                invalid_marker(marker)
                or not evaluate_marker(marker)
            )
            if fails_marker:
                reqs = []
            new_extra = safe_extra(new_extra) or None

            dm.setdefault(new_extra, []).extend(reqs)
        return dm

    def _build_dep_map(self):
        dm = {}
        for name in 'requires.txt', 'depends.txt':
            for extra, reqs in split_sections(self._get_metadata(name)):
                dm.setdefault(extra, []).extend(parse_requirements(reqs))
        return dm

    def requires(self, extras=()):
        """List of Requirements needed for this distro if `extras` are used"""
        dm = self._dep_map
        deps = []
        deps.extend(dm.get(None, ()))
        for ext in extras:
            try:
                deps.extend(dm[safe_extra(ext)])
            except KeyError:
                raise UnknownExtra(
                    "%s has no such extra feature %r" % (self, ext)
                )
        return deps

    def _get_metadata_path_for_display(self, name):
        """
        Return the path to the given metadata file, if available.
        """
        try:
            # We need to access _get_metadata_path() on the provider object
            # directly rather than through this class's __getattr__()
            # since _get_metadata_path() is marked private.
            path = self._provider._get_metadata_path(name)

        # Handle exceptions e.g. in case the distribution's metadata
        # provider doesn't support _get_metadata_path().
        except Exception:
            return '[could not detect]'

        return path

    def _get_metadata(self, name):
        if self.has_metadata(name):
            for line in self.get_metadata_lines(name):
                yield line

    def _get_version(self):
        lines = self._get_metadata(self.PKG_INFO)
        version = _version_from_file(lines)

        return version

    def activate(self, path=None, replace=False):
        """Ensure distribution is importable on `path` (default=sys.path)"""
        if path is None:
            path = sys.path
        self.insert_on(path, replace=replace)
        if path is sys.path:
            fixup_namespace_packages(self.location)
            for pkg in self._get_metadata('namespace_packages.txt'):
                if pkg in sys.modules:
                    declare_namespace(pkg)

    def egg_name(self):
        """Return what this distribution's standard .egg filename should be"""
        filename = "%s-%s-py%s" % (
            to_filename(self.project_name), to_filename(self.version),
            self.py_version or PY_MAJOR
        )

        if self.platform:
            filename += '-' + self.platform
        return filename

    def __repr__(self):
        if self.location:
            return "%s (%s)" % (self, self.location)
        else:
            return str(self)

    def __str__(self):
        try:
            version = getattr(self, 'version', None)
        except ValueError:
            version = None
        version = version or "[unknown version]"
        return "%s %s" % (self.project_name, version)

    def __getattr__(self, attr):
        """Delegate all unrecognized public attributes to .metadata provider"""
        if attr.startswith('_'):
            raise AttributeError(attr)
        return getattr(self._provider, attr)

    def __dir__(self):
        return list(
            set(super(Distribution, self).__dir__())
            | set(
                attr for attr in self._provider.__dir__()
                if not attr.startswith('_')
            )
        )

    if not hasattr(object, '__dir__'):
        # python 2.7 not supported
        del __dir__

    @classmethod
    def from_filename(cls, filename, metadata=None, **kw):
        return cls.from_location(
            _normalize_cached(filename), os.path.basename(filename), metadata,
            **kw
        )

    def as_requirement(self):
        """Return a ``Requirement`` that matches this distribution exactly"""
        if isinstance(self.parsed_version, packaging.version.Version):
            spec = "%s==%s" % (self.project_name, self.parsed_version)
        else:
            spec = "%s===%s" % (self.project_name, self.parsed_version)

        return Requirement.parse(spec)

    def load_entry_point(self, group, name):
        """Return the `name` entry point of `group` or raise ImportError"""
        ep = self.get_entry_info(group, name)
        if ep is None:
            raise ImportError("Entry point %r not found" % ((group, name),))
        return ep.load()

    def get_entry_map(self, group=None):
        """Return the entry point map for `group`, or the full entry map"""
        try:
            ep_map = self._ep_map
        except AttributeError:
            ep_map = self._ep_map = EntryPoint.parse_map(
                self._get_metadata('entry_points.txt'), self
            )
        if group is not None:
            return ep_map.get(group, {})
        return ep_map

    def get_entry_info(self, group, name):
        """Return the EntryPoint object for `group`+`name`, or ``None``"""
        return self.get_entry_map(group).get(name)

    def insert_on(self, path, loc=None, replace=False):
        """Ensure self.location is on path

        If replace=False (default):
            - If location is already in path anywhere, do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent.
              - Else: add to the end of path.
        If replace=True:
            - If location is already on path anywhere (not eggs)
              or higher priority than its parent (eggs)
              do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent,
                removing any lower-priority entries.
              - Else: add it to the front of path.
        """

        loc = loc or self.location
        if not loc:
            return

        nloc = _normalize_cached(loc)
        bdir = os.path.dirname(nloc)
        npath = [(p and _normalize_cached(p) or p) for p in path]

        for p, item in enumerate(npath):
            if item == nloc:
                if replace:
                    break
                else:
                    # don't modify path (even removing duplicates) if
                    # found and not replace
                    return
            elif item == bdir and self.precedence == EGG_DIST:
                # if it's an .egg, give it precedence over its directory
                # UNLESS it's already been added to sys.path and replace=False
                if (not replace) and nloc in npath[p:]:
                    return
                if path is sys.path:
                    self.check_version_conflict()
                path.insert(p, loc)
                npath.insert(p, nloc)
                break
        else:
            if path is sys.path:
                self.check_version_conflict()
            if replace:
                path.insert(0, loc)
            else:
                path.append(loc)
            return

        # p is the spot where we found or inserted loc; now remove duplicates
        while True:
            try:
                np = npath.index(nloc, p + 1)
            except ValueError:
                break
            else:
                del npath[np], path[np]
                # ha!
                p = np

        return

    def check_version_conflict(self):
        if self.key == 'setuptools':
            # ignore the inevitable setuptools self-conflicts  :(
            return

        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
        loc = normalize_path(self.location)
        for modname in self._get_metadata('top_level.txt'):
            if (modname not in sys.modules or modname in nsp
                    or modname in _namespace_packages):
                continue
            if modname in ('pkg_resources', 'setuptools', 'site'):
                continue
            fn = getattr(sys.modules[modname], '__file__', None)
            if fn and (normalize_path(fn).startswith(loc) or
                       fn.startswith(self.location)):
                continue
            issue_warning(
                "Module %s was already imported from %s, but %s is being added"
                " to sys.path" % (modname, fn, self.location),
            )

    def has_version(self):
        try:
            self.version
        except ValueError:
            issue_warning("Unbuilt egg for " + repr(self))
            return False
        return True

    def clone(self, **kw):
        """Copy this distribution, substituting in any changed keyword args"""
        names = 'project_name version py_version platform location precedence'
        for attr in names.split():
            kw.setdefault(attr, getattr(self, attr, None))
        kw.setdefault('metadata', self._provider)
        return self.__class__(**kw)

    @property
    def extras(self):
        return [dep for dep in self._dep_map if dep]


class EggInfoDistribution(Distribution):
    def _reload_version(self):
        """
        Packages installed by distutils (e.g. numpy or scipy),
        which uses an old safe_version, and so
        their version numbers can get mangled when
        converted to filenames (e.g., 1.11.0.dev0+2329eae to
        1.11.0.dev0_2329eae). These distributions will not be
        parsed properly
        downstream by Distribution and safe_version, so
        take an extra step and try to get the version number from
        the metadata file itself instead of the filename.
        """
        md_version = self._get_version()
        if md_version:
            self._version = md_version
        return self


class DistInfoDistribution(Distribution):
    """
    Wrap an actual or potential sys.path entry
    w/metadata, .dist-info style.
    """
    PKG_INFO = 'METADATA'
    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")

    @property
    def _parsed_pkg_info(self):
        """Parse and cache metadata"""
        try:
            return self._pkg_info
        except AttributeError:
            metadata = self.get_metadata(self.PKG_INFO)
            self._pkg_info = email.parser.Parser().parsestr(metadata)
            return self._pkg_info

    @property
    def _dep_map(self):
        try:
            return self.__dep_map
        except AttributeError:
            self.__dep_map = self._compute_dependencies()
            return self.__dep_map

    def _compute_dependencies(self):
        """Recompute this distribution's dependencies."""
        dm = self.__dep_map = {None: []}

        reqs = []
        # Including any condition expressions
        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
            reqs.extend(parse_requirements(req))

        def reqs_for_extra(extra):
            for req in reqs:
                if not req.marker or req.marker.evaluate({'extra': extra}):
                    yield req

        common = frozenset(reqs_for_extra(None))
        dm[None].extend(common)

        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
            s_extra = safe_extra(extra.strip())
            dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)

        return dm


_distributionImpl = {
    '.egg': Distribution,
    '.egg-info': EggInfoDistribution,
    '.dist-info': DistInfoDistribution,
}


def issue_warning(*args, **kw):
    level = 1
    g = globals()
    try:
        # find the first stack frame that is *not* code in
        # the pkg_resources module, to use for the warning
        while sys._getframe(level).f_globals is g:
            level += 1
    except ValueError:
        pass
    warnings.warn(stacklevel=level + 1, *args, **kw)


class RequirementParseError(ValueError):
    def __str__(self):
        return ' '.join(self.args)


def parse_requirements(strs):
    """Yield ``Requirement`` objects for each specification in `strs`

    `strs` must be a string, or a (possibly-nested) iterable thereof.
    """
    # create a steppable iterator, so we can handle \-continuations
    lines = iter(yield_lines(strs))

    for line in lines:
        # Drop comments -- a hash without a space may be in a URL.
        if ' #' in line:
            line = line[:line.find(' #')]
        # If there is a line continuation, drop it, and append the next line.
        if line.endswith('\\'):
            line = line[:-2].strip()
            try:
                line += next(lines)
            except StopIteration:
                return
        yield Requirement(line)


class Requirement(packaging.requirements.Requirement):
    def __init__(self, requirement_string):
        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
        try:
            super(Requirement, self).__init__(requirement_string)
        except packaging.requirements.InvalidRequirement as e:
            raise RequirementParseError(str(e))
        self.unsafe_name = self.name
        project_name = safe_name(self.name)
        self.project_name, self.key = project_name, project_name.lower()
        self.specs = [
            (spec.operator, spec.version) for spec in self.specifier]
        self.extras = tuple(map(safe_extra, self.extras))
        self.hashCmp = (
            self.key,
            self.url,
            self.specifier,
            frozenset(self.extras),
            str(self.marker) if self.marker else None,
        )
        self.__hash = hash(self.hashCmp)

    def __eq__(self, other):
        return (
            isinstance(other, Requirement) and
            self.hashCmp == other.hashCmp
        )

    def __ne__(self, other):
        return not self == other

    def __contains__(self, item):
        if isinstance(item, Distribution):
            if item.key != self.key:
                return False

            item = item.version

        # Allow prereleases always in order to match the previous behavior of
        # this method. In the future this should be smarter and follow PEP 440
        # more accurately.
        return self.specifier.contains(item, prereleases=True)

    def __hash__(self):
        return self.__hash

    def __repr__(self):
        return "Requirement.parse(%r)" % str(self)

    @staticmethod
    def parse(s):
        req, = parse_requirements(s)
        return req


def _always_object(classes):
    """
    Ensure object appears in the mro even
    for old-style classes.
    """
    if object not in classes:
        return classes + (object,)
    return classes


def _find_adapter(registry, ob):
    """Return an adapter factory for `ob` from `registry`"""
    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
    for t in types:
        if t in registry:
            return registry[t]


def ensure_directory(path):
    """Ensure that the parent directory of `path` exists"""
    dirname = os.path.dirname(path)
    py31compat.makedirs(dirname, exist_ok=True)


def _bypass_ensure_directory(path):
    """Sandbox-bypassing version of ensure_directory()"""
    if not WRITE_SUPPORT:
        raise IOError('"os.mkdir" not supported on this platform.')
    dirname, filename = split(path)
    if dirname and filename and not isdir(dirname):
        _bypass_ensure_directory(dirname)
        try:
            mkdir(dirname, 0o755)
        except FileExistsError:
            pass


def split_sections(s):
    """Split a string or iterable thereof into (section, content) pairs

    Each ``section`` is a stripped version of the section header ("[section]")
    and each ``content`` is a list of stripped lines excluding blank lines and
    comment-only lines.  If there are any such lines before the first section
    header, they're returned in a first ``section`` of ``None``.
    """
    section = None
    content = []
    for line in yield_lines(s):
        if line.startswith("["):
            if line.endswith("]"):
                if section or content:
                    yield section, content
                section = line[1:-1].strip()
                content = []
            else:
                raise ValueError("Invalid section heading", line)
        else:
            content.append(line)

    # wrap up last segment
    yield section, content


def _mkstemp(*args, **kw):
    old_open = os.open
    try:
        # temporarily bypass sandboxing
        os.open = os_open
        return tempfile.mkstemp(*args, **kw)
    finally:
        # and then put it back
        os.open = old_open


# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)


# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
    f(*args, **kwargs)
    return f


@_call_aside
def _initialize(g=globals()):
    "Set up global resource manager (deliberately not state-saved)"
    manager = ResourceManager()
    g['_manager'] = manager
    g.update(
        (name, getattr(manager, name))
        for name in dir(manager)
        if not name.startswith('_')
    )


@_call_aside
def _initialize_master_working_set():
    """
    Prepare the master working set and make the ``require()``
    API available.

    This function has explicit effects on the global state
    of pkg_resources. It is intended to be invoked once at
    the initialization of this module.

    Invocation by other packages is unsupported and done
    at their own risk.
    """
    working_set = WorkingSet._build_master()
    _declare_state('object', working_set=working_set)

    require = working_set.require
    iter_entry_points = working_set.iter_entry_points
    add_activation_listener = working_set.subscribe
    run_script = working_set.run_script
    # backward compatibility
    run_main = run_script
    # Activate all distributions already on sys.path with replace=False and
    # ensure that all distributions added to the working set in the future
    # (e.g. by calling ``require()``) will get activated as well,
    # with higher priority (replace=True).
    tuple(
        dist.activate(replace=False)
        for dist in working_set
    )
    add_activation_listener(
        lambda dist: dist.activate(replace=True),
        existing=False,
    )
    working_set.entries = []
    # match order
    list(map(working_set.add_entry, sys.path))
    globals().update(locals())

class PkgResourcesDeprecationWarning(Warning):
    """
    Base class for warning about deprecations in ``pkg_resources``

    This class is not derived from ``DeprecationWarning``, and as such is
    visible by default.
    """
PK�V[���F..
py31compat.pynu�[���import os
import errno
import sys

from .extern import six


def _makedirs_31(path, exist_ok=False):
    try:
        os.makedirs(path)
    except OSError as exc:
        if not exist_ok or exc.errno != errno.EEXIST:
            raise


# rely on compatibility behavior until mode considerations
#  and exists_ok considerations are disentangled.
# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
needs_makedirs = (
    six.PY2 or
    (3, 4) <= sys.version_info < (3, 4, 1)
)
makedirs = _makedirs_31 if needs_makedirs else os.makedirs
PK�V[���	�	extern/__init__.pynu�[���import sys


class VendorImporter:
    """
    A PEP 302 meta path importer for finding optionally-vendored
    or otherwise naturally-installed packages from root_name.
    """

    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
        self.root_name = root_name
        self.vendored_names = set(vendored_names)
        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')

    @property
    def search_path(self):
        """
        Search first the vendor package then as a natural package.
        """
        yield self.vendor_pkg + '.'
        yield ''

    def find_module(self, fullname, path=None):
        """
        Return self when fullname starts with root_name and the
        target module is one vendored through this importer.
        """
        root, base, target = fullname.partition(self.root_name + '.')
        if root:
            return
        if not any(map(target.startswith, self.vendored_names)):
            return
        return self

    def load_module(self, fullname):
        """
        Iterate over the search path to locate and load fullname.
        """
        root, base, target = fullname.partition(self.root_name + '.')
        for prefix in self.search_path:
            try:
                extant = prefix + target
                __import__(extant)
                mod = sys.modules[extant]
                sys.modules[fullname] = mod
                # mysterious hack:
                # Remove the reference to the extant package/module
                # on later Python versions to cause relative imports
                # in the vendor package to resolve the same modules
                # as those going through this importer.
                if prefix and sys.version_info > (3, 3):
                    del sys.modules[extant]
                return mod
            except ImportError:
                pass
        else:
            raise ImportError(
                "The '{target}' package is required; "
                "normally this is bundled with this package so if you get "
                "this warning, consult the packager of your "
                "distribution.".format(**locals())
            )

    def install(self):
        """
        Install this importer into sys.meta_path if not already present.
        """
        if self not in sys.meta_path:
            sys.meta_path.append(self)


names = 'packaging', 'pyparsing', 'six', 'appdirs'
VendorImporter(__name__, names).install()
PK�V[Q��f	f	0extern/__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

�Qab�	�@s,ddlZGdd�d�ZdZeee���dS)�Nc@s@eZdZdZddd�Zedd��Zddd	�Zd
d�Zdd
�Z	dS)�VendorImporterz�
    A PEP 302 meta path importer for finding optionally-vendored
    or otherwise naturally-installed packages from root_name.
    �NcCs&||_t|�|_|p|�dd�|_dS)NZexternZ_vendor)�	root_name�set�vendored_names�replace�
vendor_pkg)�selfrrrrr�A/usr/lib/python3.8/site-packages/pkg_resources/extern/__init__.py�__init__
s
zVendorImporter.__init__ccs|jdVdVdS)zL
        Search first the vendor package then as a natural package.
        �.�N)r�r	rrr
�search_pathszVendorImporter.search_pathcCs8|�|jd�\}}}|rdStt|j|j��s4dS|S)z�
        Return self when fullname starts with root_name and the
        target module is one vendored through this importer.
        rN)�	partitionr�any�map�
startswithr)r	�fullname�path�root�base�targetrrr
�find_moduleszVendorImporter.find_modulec	Cs�|�|jd�\}}}|jD]^}zD||}t|�tj|}|tj|<|r\tjdkr\tj|=|WStk
rxYqXqtdjft	����dS)zK
        Iterate over the search path to locate and load fullname.
        r)�rz�The '{target}' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.N)
rrr�
__import__�sys�modules�version_info�ImportError�format�locals)r	rrrr�prefixZextant�modrrr
�load_module#s"



��zVendorImporter.load_modulecCs|tjkrtj�|�dS)zR
        Install this importer into sys.meta_path if not already present.
        N)r�	meta_path�appendrrrr
�install@s
zVendorImporter.install)rN)N)
�__name__�
__module__�__qualname__�__doc__r�propertyrrr$r'rrrr
rs


r)Z	packagingZ	pyparsingZsixZappdirs)rr�namesr(r'rrrr
�<module>sDPK�V[Q��f	f	*extern/__pycache__/__init__.cpython-38.pycnu�[���U

�Qab�	�@s,ddlZGdd�d�ZdZeee���dS)�Nc@s@eZdZdZddd�Zedd��Zddd	�Zd
d�Zdd
�Z	dS)�VendorImporterz�
    A PEP 302 meta path importer for finding optionally-vendored
    or otherwise naturally-installed packages from root_name.
    �NcCs&||_t|�|_|p|�dd�|_dS)NZexternZ_vendor)�	root_name�set�vendored_names�replace�
vendor_pkg)�selfrrrrr�A/usr/lib/python3.8/site-packages/pkg_resources/extern/__init__.py�__init__
s
zVendorImporter.__init__ccs|jdVdVdS)zL
        Search first the vendor package then as a natural package.
        �.�N)r�r	rrr
�search_pathszVendorImporter.search_pathcCs8|�|jd�\}}}|rdStt|j|j��s4dS|S)z�
        Return self when fullname starts with root_name and the
        target module is one vendored through this importer.
        rN)�	partitionr�any�map�
startswithr)r	�fullname�path�root�base�targetrrr
�find_moduleszVendorImporter.find_modulec	Cs�|�|jd�\}}}|jD]^}zD||}t|�tj|}|tj|<|r\tjdkr\tj|=|WStk
rxYqXqtdjft	����dS)zK
        Iterate over the search path to locate and load fullname.
        r)�rz�The '{target}' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.N)
rrr�
__import__�sys�modules�version_info�ImportError�format�locals)r	rrrr�prefixZextant�modrrr
�load_module#s"



��zVendorImporter.load_modulecCs|tjkrtj�|�dS)zR
        Install this importer into sys.meta_path if not already present.
        N)r�	meta_path�appendrrrr
�install@s
zVendorImporter.install)rN)N)
�__name__�
__module__�__qualname__�__doc__r�propertyrrr$r'rrrr
rs


r)Z	packagingZ	pyparsingZsixZappdirs)rr�namesr(r'rrrr
�<module>sDPK�V[@շKXX%__pycache__/py31compat.cpython-38.pycnu�[���U

�Qab.�@s`ddlZddlZddlZddlmZd	dd�ZejpLdejkoHdknZerVenej	Z	dS)
�N�)�sixFc
CsHzt�|�Wn4tk
rB}z|r0|jtjkr2�W5d}~XYnXdS)N)�os�makedirs�OSError�errnoZEEXIST)�path�exist_ok�exc�r�</usr/lib/python3.8/site-packages/pkg_resources/py31compat.py�_makedirs_31s
r
)��)rrr)F)
rr�sysZexternrr
ZPY2�version_infoZneeds_makedirsrrrrr�<module>s
�PK�V[@շKXX+__pycache__/py31compat.cpython-38.opt-1.pycnu�[���U

�Qab.�@s`ddlZddlZddlZddlmZd	dd�ZejpLdejkoHdknZerVenej	Z	dS)
�N�)�sixFc
CsHzt�|�Wn4tk
rB}z|r0|jtjkr2�W5d}~XYnXdS)N)�os�makedirs�OSError�errnoZEEXIST)�path�exist_ok�exc�r�</usr/lib/python3.8/site-packages/pkg_resources/py31compat.py�_makedirs_31s
r
)��)rrr)F)
rr�sysZexternrr
ZPY2�version_infoZneeds_makedirsrrrrr�<module>s
�PK�V[\0�[��)__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

�Qab=��G@s�dZddlmZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlmZzddlZWnek
�rddlZYnXze Wne!k
�r*e"Z YnXddl#m$Z$ddl%m&Z&m'Z'm(Z(ddlm)Z)zddlm*Z*m+Z+m,Z,d	Z-Wnek
�r�d
Z-YnXddlm.Z/ddl0m1Z1m2Z2zddl3m4Z5e5j6Wnek
�r�dZ5YnXd
dl7m8Z8ddl#m9Z9ddl#m:Z:e;d�e;d�e;d�e;d�e<Z=dej>k�r@dk�rLnne?d��e$j@�r\dZAdZBdZCdZDdZEdZFdZGdZHdZIdZJdZKdZLdZMdZNdZOdZPdZQdZRdZSGdd�deT�ZUdd�ZViZWdd�ZXdd�ZYd d!�ZZd"d#�Z[d$d%�Z\d&d'�Z]d(d)�Z^d*d+�Z_Z`d,d-�Zad.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMddNddOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgGZbGdsdI�dIec�ZdGdtdJ�dJed�ZeGdudv�dvee�ZfGdwdK�dKed�ZgGdxdL�dLed�ZhiZidyjjej>�ZkdzZld{Zmd
ZndZod|Zpd}dm�Zqd~d0�Zrgfdd��Zsd�d��Ztd�d��Zue�vd��Zwe�vd��ZxeuZyd�dR�Zzd�d/�Z{e{Z|d�d1�Z}d�d2�Z~�dd�d3�Zd�d4�Z�Gd�d`�d`�Z�Gd�da�dae��Z�Gd�dD�dD�Z�Gd�d��d�e��Z�Gd�dC�dC�Z�e�Z�Gd�dM�dMe?�Z�Gd�dE�dE�Z�d�dB�Z�d�dO�Z�d�dP�Z�d�dU�Z�d�dV�Z�d�dW�Z��dd�dX�Z�Gd�dg�dg�Z�eqe�e��Gd�dh�dhe��Z�Gd�di�die��Z�e����Gd�de�dee��Z�e��Z�Gd�d��d�e��Z�Gd�d��d�e��Z�Gd�dj�dje��Z�eqe
j�e��Gd�db�dbe��Z�Gd�dc�dce��Z�Gd�dd�dde��Z�eXd�id��d�dk�Z��dd�d?�Z��dd�d��Z�e�e
j�e���dd�d��Z�e�e�e��d�d��Z��dd�d��Z�d�d��Z�Gd�d��d��Z�d�d��Z�d�d��Z�d�d��Z�d�d��Z�e�ej�e��e�e5d���r�e�e5j�e��eXd�id��eXd�idd�dl�Z�d�dńZ�d�dDŽZ�d�d<�Z��dd�dn�Z�d�d˄Z�e�ej�e��e�e
j�e��e�e5d���r,e�e5j�e��d�d̈́Z�e�e�e��d�dZ�Z�d�dЄZ�ifd�d҄Z�d�dԄZ�d�dքZ�d�d؄Z�d�dS�Z�e�vdڡj�Z�e�vd�ej�ej�B�j�Z�Gd�dH�dH�Z�d�dބZ�d�d�Z�Gd�dF�dF�Z�Gd�d�d�eăZ�Gd�d�d�eăZ�e�e�e�d�Z�d�d�Z�Gd�d�d�eɃZ�d�dN�Z�Gd�dG�dGe:j�j̓Z�d�d�Z�d�d�Z�d�dY�Z�d�d�Z�d�dT�Z�d�d��Z�ej�d�eUd	d��d�d��Z�e�eփfd�d���Z�e�d�d���Z�Gd�dp�dpeكZ�dS(aZ
Package resource API
--------------------

A resource is a logical file contained within a package, or a logical
subdirectory thereof.  The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is.  Do not use os.path operations to manipulate resource
names being passed into the API.

The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files.  It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
�)�absolute_importN)�get_importer)�six)�urllib�map�filter)�utime)�mkdir�rename�unlinkTF)�open)�isdir�split�)�
py31compat)�appdirs)�	packagingz&pkg_resources.extern.packaging.versionz)pkg_resources.extern.packaging.specifiersz+pkg_resources.extern.packaging.requirementsz&pkg_resources.extern.packaging.markers)�r)r�zPython 3.4 or later is requiredc@seZdZdZdS)�
PEP440Warningza
    Used when there is an issue with a version or specifier not complying with
    PEP 440.
    N��__name__�
__module__�__qualname__�__doc__�rr�:/usr/lib/python3.8/site-packages/pkg_resources/__init__.pyrxsrcCs8ztj�|�WStjjk
r2tj�|�YSXdS�N)r�version�Version�InvalidVersion�
LegacyVersion)�vrrr�
parse_versionsr#cKs"t��|�t�t�||��dSr)�globals�update�_state_vars�dict�fromkeys)Zvartype�kwrrr�_declare_state�sr*cCs8i}t�}t��D] \}}|d|||�||<q|S)NZ_sget_)r$r&�items��state�g�kr"rrr�__getstate__�s
r0cCs8t�}|��D]$\}}|dt|||||�q|S)NZ_sset_)r$r+r&r,rrr�__setstate__�sr1cCs|��Sr)�copy��valrrr�
_sget_dict�sr5cCs|��|�|�dSr)�clearr%��key�obr-rrr�
_sset_dict�sr:cCs|��Sr)r0r3rrr�_sget_object�sr;cCs|�|�dSr)r1r7rrr�_sset_object�sr<cGsdSrr��argsrrr�<lambda>��r?cCsbt�}t�|�}|dk	r^tjdkr^z&dd�t�dd��|�d�f}Wntk
r\YnX|S)aZReturn this platform's maximum compatible version.

    distutils.util.get_platform() normally reports the minimum version
    of Mac OS X that would be required to *use* extensions produced by
    distutils.  But what we want when checking compatibility is to know the
    version of Mac OS X that we are *running*.  To allow usage of packages that
    explicitly require a newer version of Mac OS X, we must also know the
    current version of the OS.

    If this condition occurs for any other platform with a version in its
    platform strings, this function should be extended accordingly.
    N�darwinzmacosx-%s-%s�.�r)	�get_build_platform�macosVersionString�match�sys�platform�join�_macosx_vers�group�
ValueError)�plat�mrrr�get_supported_platform�s

&rO�require�
run_script�get_provider�get_distribution�load_entry_point�
get_entry_map�get_entry_info�iter_entry_points�resource_string�resource_stream�resource_filename�resource_listdir�resource_exists�resource_isdir�declare_namespace�working_set�add_activation_listener�find_distributions�set_extraction_path�cleanup_resources�get_default_cache�Environment�
WorkingSet�ResourceManager�Distribution�Requirement�
EntryPoint�ResolutionError�VersionConflict�DistributionNotFound�UnknownExtra�ExtractionError�parse_requirements�	safe_name�safe_version�get_platform�compatible_platforms�yield_lines�split_sections�
safe_extra�to_filename�invalid_marker�evaluate_marker�ensure_directory�normalize_path�EGG_DIST�BINARY_DIST�SOURCE_DIST�
CHECKOUT_DIST�DEVELOP_DIST�IMetadataProvider�IResourceProvider�FileMetadata�PathMetadata�EggMetadata�
EmptyProvider�empty_provider�NullProvider�EggProvider�DefaultProvider�ZipProvider�register_finder�register_namespace_handler�register_loader_type�fixup_namespace_packagesr�PkgResourcesDeprecationWarning�run_main�AvailableDistributionsc@seZdZdZdd�ZdS)rkz.Abstract base for dependency resolution errorscCs|jjt|j�Sr)�	__class__r�reprr>��selfrrr�__repr__�szResolutionError.__repr__N)rrrrr�rrrrrk�sc@s<eZdZdZdZedd��Zedd��Zdd�Zd	d
�Z	dS)rlz�
    An already-installed version conflicts with the requested version.

    Should be initialized with the installed Distribution and the requested
    Requirement.
    z3{self.dist} is installed but {self.req} is requiredcCs
|jdS�Nrr=r�rrr�distszVersionConflict.distcCs
|jdS�Nrr=r�rrr�reqszVersionConflict.reqcCs|jjft��Sr��	_template�format�localsr�rrr�reportszVersionConflict.reportcCs|s|S|j|f}t|�S)zt
        If required_by is non-empty, return a version of self that is a
        ContextualVersionConflict.
        )r>�ContextualVersionConflict)r��required_byr>rrr�with_contextszVersionConflict.with_contextN)
rrrrr��propertyr�r�r�r�rrrrrls

c@s&eZdZdZejdZedd��ZdS)r�z�
    A VersionConflict that accepts a third parameter, the set of the
    requirements that required the installed Distribution.
    z by {self.required_by}cCs
|jdS)NrCr=r�rrrr�*sz%ContextualVersionConflict.required_byN)rrrrrlr�r�r�rrrrr�"s
r�c@sHeZdZdZdZedd��Zedd��Zedd��Zd	d
�Z	dd�Z
d
S)rmz&A requested distribution was not foundzSThe '{self.req}' distribution was not found and is required by {self.requirers_str}cCs
|jdSr�r=r�rrrr�5szDistributionNotFound.reqcCs
|jdSr�r=r�rrr�	requirers9szDistributionNotFound.requirerscCs|js
dSd�|j�S)Nzthe applicationz, )r�rIr�rrr�
requirers_str=sz"DistributionNotFound.requirers_strcCs|jjft��Srr�r�rrrr�CszDistributionNotFound.reportcCs|��Sr)r�r�rrr�__str__FszDistributionNotFound.__str__N)rrrrr�r�r�r�r�r�r�rrrrrm/s


c@seZdZdZdS)rnz>Distribution doesn't have an "extra feature" of the given nameNrrrrrrnJsz{}.{}rrC���cCs|t|<dS)aRegister `provider_factory` to make providers for `loader_type`

    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
    and `provider_factory` is a function that, passed a *module* object,
    returns an ``IResourceProvider`` for that module.
    N)�_provider_factories)Zloader_typeZprovider_factoryrrrr�XscCstt|t�r$t�|�p"tt|��dSztj|}Wn&tk
rXt	|�tj|}YnXt
|dd�}tt|�|�S)z?Return an IResourceProvider for the named module or requirementr�
__loader__N)
�
isinstancerir_�findrP�strrG�modules�KeyError�
__import__�getattr�
_find_adapterr�)ZmoduleOrReq�module�loaderrrrrRbs
cCsd|s\t��d}|dkrLd}tj�|�rLttd�rLt�|�}d|krL|d}|�|�	d��|dS)Nr�z0/System/Library/CoreServices/SystemVersion.plist�	readPlistZProductVersionrB)
rHZmac_ver�os�path�exists�hasattr�plistlibr��appendr)�_cacherZplistZ
plist_contentrrrrJos

rJcCsddd��||�S)NZppc)ZPowerPCZPower_Macintosh)�get)�machinerrr�_macosx_archsr�cCs~ddlm}|�}tjdkrz|�d�szz>t�}t��d�dd�}dt	|d�t	|d	�t
|�fWStk
rxYnX|S)
z�Return this platform's string for platform-specific distributions

    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
    needs some hacks for Linux and Mac OS X.
    r)rsrAzmacosx-r� �_zmacosx-%d.%d-%sr)Z	sysconfigrsrGrH�
startswithrJr��uname�replace�intr�rL)rsrMrr�rrrrD�s

�rDzmacosx-(\d+)\.(\d+)-(.*)zdarwin-(\d+)\.(\d+)\.(\d+)-(.*)cCs�|dks|dks||krdSt�|�}|r�t�|�}|s�t�|�}|r�t|�d��}d|�d�|�d�f}|dkr||dks�|dkr�|d	kr�dSd
S|�d�|�d�ks�|�d�|�d�kr�d
St|�d��t|�d��kr�d
SdSd
S)z�Can code for the `provided` platform run on the `required` platform?

    Returns true if either platform is ``None``, or the platforms are equal.

    XXX Needs compatibility checks for Linux and other unixy OSes.
    NTrz%s.%srC�z10.3�z10.4Fr)rErF�darwinVersionStringr�rK)ZprovidedZrequiredZreqMacZprovMacZ
provDarwinZdversionZmacosversionrrrrt�s2


���cCs<t�d�j}|d}|��||d<t|�d�||�dS)z@Locate distribution `dist_spec` and run its `script_name` scriptrrrN�rG�	_getframe�	f_globalsr6rPrQ)Z	dist_spec�script_name�ns�namerrrrQ�s
cCs@t|tj�rt�|�}t|t�r(t|�}t|t�s<td|��|S)z@Return a current distribution object for a Requirement or stringz-Expected string, Requirement, or Distribution)r�r�string_typesri�parserRrh�	TypeError�r�rrrrS�s



cCst|��||�S)zDReturn `name` entry point of `group` for `dist` or raise ImportError)rSrT�r�rKr�rrrrT�scCst|��|�S)�=Return the entry point map for `group`, or the full entry map)rSrU)r�rKrrrrU�scCst|��||�S�z<Return the EntryPoint object for `group`+`name`, or ``None``)rSrVr�rrrrV�sc@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)r�cCsdS)z;Does the package's distribution contain the named metadata?Nr�r�rrr�has_metadata�szIMetadataProvider.has_metadatacCsdS)z'The named metadata resource as a stringNrr�rrr�get_metadata�szIMetadataProvider.get_metadatacCsdS)z�Yield named metadata resource as list of non-blank non-comment lines

       Leading and trailing whitespace is stripped from each line, and lines
       with ``#`` as the first non-blank character are omitted.Nrr�rrr�get_metadata_lines�sz$IMetadataProvider.get_metadata_linescCsdS)z>Is the named metadata a directory?  (like ``os.path.isdir()``)Nrr�rrr�metadata_isdirsz IMetadataProvider.metadata_isdircCsdS)z?List of metadata names in the directory (like ``os.listdir()``)Nrr�rrr�metadata_listdirsz"IMetadataProvider.metadata_listdircCsdS)z=Execute the named script in the supplied namespace dictionaryNr)r��	namespacerrrrQ	szIMetadataProvider.run_scriptN)	rrrr�r�r�r�r�rQrrrrr��sc@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)r�z3An object that provides access to package resourcescCsdS)zdReturn a true filesystem path for `resource_name`

        `manager` must be an ``IResourceManager``Nr��manager�
resource_namerrr�get_resource_filenamesz'IResourceProvider.get_resource_filenamecCsdS)ziReturn a readable file-like object for `resource_name`

        `manager` must be an ``IResourceManager``Nrr�rrr�get_resource_streamsz%IResourceProvider.get_resource_streamcCsdS)zmReturn a string containing the contents of `resource_name`

        `manager` must be an ``IResourceManager``Nrr�rrr�get_resource_stringsz%IResourceProvider.get_resource_stringcCsdS)z,Does the package contain the named resource?Nr�r�rrr�has_resourceszIResourceProvider.has_resourcecCsdS)z>Is the named resource a directory?  (like ``os.path.isdir()``)Nrr�rrrr]"sz IResourceProvider.resource_isdircCsdS)z?List of resource names in the directory (like ``os.listdir()``)Nrr�rrrr[%sz"IResourceProvider.resource_listdirN)
rrrrr�r�r�r�r]r[rrrrr�
sc@s�eZdZdZd'dd�Zedd��Zedd��Zd	d
�Zdd�Z	d
d�Z
d(dd�Zdd�Zdd�Z
d)dd�Zd*dd�Zd+dd�Zdd�Zd,dd �Zd!d"�Zd#d$�Zd%d&�ZdS)-rfzDA collection of active distributions on sys.path (or a similar list)NcCs>g|_i|_i|_g|_|dkr&tj}|D]}|�|�q*dS)z?Create working set from list of path entries (default=sys.path)N)�entries�
entry_keys�by_key�	callbacksrGr��	add_entry)r�r��entryrrr�__init__,szWorkingSet.__init__cCsb|�}zddlm}Wntk
r.|YSXz|�|�Wntk
r\|�|�YSX|S)z1
        Prepare the master working set.
        r)�__requires__)�__main__r��ImportErrorrPrl�_build_from_requirements)�cls�wsr�rrr�
_build_master9s
zWorkingSet._build_mastercCsf|g�}t|�}|�|t��}|D]}|�|�q"tjD]}||jkr8|�|�q8|jtjdd�<|S)zQ
        Build a working set from a requirement spec. Rewrites sys.path.
        N)rp�resolvere�addrGr�r�r�)r�Zreq_specr��reqs�distsr�r�rrrr�Ms

z#WorkingSet._build_from_requirementscCs<|j�|g�|j�|�t|d�D]}|�||d�q$dS)a�Add a path item to ``.entries``, finding any distributions on it

        ``find_distributions(entry, True)`` is used to find distributions
        corresponding to the path entry, and they are added.  `entry` is
        always appended to ``.entries``, even if it is already present.
        (This is because ``sys.path`` can contain the same value more than
        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
        equal ``sys.path``.)
        TFN)r��
setdefaultr�r�rar�)r�r�r�rrrr�cs
zWorkingSet.add_entrycCs|j�|j�|kS)z9True if `dist` is the active distribution for its project)r�r�r8�r�r�rrr�__contains__rszWorkingSet.__contains__cCs,|j�|j�}|dk	r(||kr(t||��|S)a�Find a distribution matching requirement `req`

        If there is an active distribution for the requested project, this
        returns it as long as it meets the version requirement specified by
        `req`.  But, if there is an active distribution for the project and it
        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
        If there is no active distribution for the requested project, ``None``
        is returned.
        N)r�r�r8rl)r�r�r�rrrr�vs

zWorkingSet.findcs��fdd�|D�S)aYield entry point objects from `group` matching `name`

        If `name` is None, yields all entry points in `group` from all
        distributions in the working set, otherwise only ones matching
        both `group` and `name` are yielded (in distribution order).
        c3s8|]0}|�����D]}�dks*�|jkr|VqqdSr)rU�valuesr�)�.0r�r��rKr�rr�	<genexpr>�s
�z/WorkingSet.iter_entry_points.<locals>.<genexpr>r�r�rKr�rr�rrW�s�zWorkingSet.iter_entry_pointscCs>t�d�j}|d}|��||d<|�|�d�||�dS)z?Locate distribution for `requires` and run `script_name` scriptrrrNr�)r��requiresr�r�r�rrrrQ�s
zWorkingSet.run_scriptccsLi}|jD]<}||jkrq
|j|D] }||kr$d||<|j|Vq$q
dS)z�Yield distributions for non-duplicate projects in the working set

        The yield order is the order in which the items' path entries were
        added to the working set.
        rN)r�r�r�)r��seen�itemr8rrr�__iter__�s

zWorkingSet.__iter__TFcCs�|r|j|j||d�|dkr$|j}|j�|g�}|j�|jg�}|sV|j|jkrVdS||j|j<|j|krx|�|j�|j|kr�|�|j�|�|�dS)aAdd `dist` to working set, associated with `entry`

        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
        On exit from this routine, `entry` is added to the end of the working
        set's ``.entries`` (if it wasn't already present).

        `dist` is only added to the working set if it's for a project that
        doesn't already have a distribution in the set, unless `replace=True`.
        If it's added, any callbacks registered with the ``subscribe()`` method
        will be called.
        �r�N)	�	insert_onr��locationr�r�r8r�r��
_added_new)r�r�r��insertr��keysZkeys2rrrr��s

zWorkingSet.addcCsxt|�ddd�}i}i}g}t�}	t�t�}
|�rt|�d�}||krHq.|	�||�sVq.|�|j�}|dk�r|j	�|j�}|dks�||kr�|r�|}
|dkr�|dkr�t
|j�}nt
g�}tg�}
|j
||
||d�}||j<|dkr�|
�|d�}t||��|�|�||k�r$|
|}t||��|��|�|j�ddd�}|�|�|D] }|
|�|j�|j|	|<�qHd||<q.|S)a�List all distributions needed to (recursively) meet `requirements`

        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
        if supplied, should be an ``Environment`` instance.  If
        not supplied, it defaults to all distributions available within any
        entry or distribution in the working set.  `installer`, if supplied,
        will be invoked with each requirement that cannot be met by an
        already-installed distribution; it should return a ``Distribution`` or
        ``None``.

        Unless `replace_conflicting=True`, raises a VersionConflict exception
        if
        any requirements are found on the path that have the correct name but
        the wrong version.  Otherwise, if an `installer` is supplied it will be
        invoked to obtain the correct version of the requirement and activate
        it.

        `extras` is a list of the extras to be used with these requirements.
        This is important because extra requirements may look like `my_req;
        extra = "my_extra"`, which would otherwise be interpreted as a purely
        optional requirement.  Instead, we want to be able to assert that these
        requirements are truly required.
        Nr�r)�replace_conflictingT)�list�
_ReqExtras�collections�defaultdict�set�pop�markers_passr�r8r�rer�rf�
best_matchrmr�rlr�r�extras�extendr��project_name)r��requirements�env�	installerrrZ	processedZbestZto_activateZ
req_extrasr�r�r�r�r�Z
dependent_reqZnew_requirementsZnew_requirementrrrr��sT


�




zWorkingSet.resolvecCs
t|�}|��i}i}|dkr4t|j�}||7}n||}|�g�}	tt|	j|��|D]�}
||
D]�}|��g}z|	�|||�}
WnBt	k
r�}z$|||<|r�WY�qfn
WY�qZW5d}~XYqfXtt|	j|
��|�
t�|
��qZqfqZt|�}|��||fS)asFind all activatable distributions in `plugin_env`

        Example usage::

            distributions, errors = working_set.find_plugins(
                Environment(plugin_dirlist)
            )
            # add plugins+libs to sys.path
            map(working_set.add, distributions)
            # display errors
            print('Could not load', errors)

        The `plugin_env` should be an ``Environment`` instance that contains
        only distributions that are in the project's "plugin directory" or
        directories. The `full_env`, if supplied, should be an ``Environment``
        contains all currently-available distributions.  If `full_env` is not
        supplied, one is created automatically from the ``WorkingSet`` this
        method is called on, which will typically mean that every directory on
        ``sys.path`` will be scanned for distributions.

        `installer` is a standard installer callback as used by the
        ``resolve()`` method. The `fallback` flag indicates whether we should
        attempt to resolve older versions of a plugin if the newest version
        cannot be resolved.

        This method returns a 2-tuple: (`distributions`, `error_info`), where
        `distributions` is a list of the distributions found in `plugin_env`
        that were loadable, along with any other distributions that are needed
        to resolve their dependencies.  `error_info` is a dictionary mapping
        unloadable plugin distributions to an exception instance describing the
        error that occurred. Usually this will be a ``DistributionNotFound`` or
        ``VersionConflict`` instance.
        N)
r�sortrer�r�rr��as_requirementr�rkr%r'r()r�Z
plugin_envZfull_envrZfallbackZplugin_projectsZ
error_infoZ
distributionsrZ
shadow_setrr�r�Z	resolveesr"rrr�find_plugins's4$




zWorkingSet.find_pluginscGs&|�t|��}|D]}|�|�q|S)a�Ensure that distributions matching `requirements` are activated

        `requirements` must be a string or a (possibly-nested) sequence
        thereof, specifying the distributions and versions required.  The
        return value is a sequence of the distributions that needed to be
        activated to fulfill the requirements; all relevant distributions are
        included, even if they were already activated in this working set.
        )r�rpr�)r�rZneededr�rrrrP{s	zWorkingSet.requirecCs8||jkrdS|j�|�|s"dS|D]}||�q&dS)z�Invoke `callback` for all distributions

        If `existing=True` (default),
        call on all existing ones, as well.
        N)r�r�)r��callback�existingr�rrr�	subscribe�s
zWorkingSet.subscribecCs|jD]}||�qdSr)r�)r�r�rrrrr�s
zWorkingSet._added_newcCs,|jdd�|j��|j��|jdd�fSr)r�r�r2r�r�r�rrrr0�s
�zWorkingSet.__getstate__cCs@|\}}}}|dd�|_|��|_|��|_|dd�|_dSr)r�r2r�r�r�)r�Ze_k_b_cr�r
r�r�rrrr1�s


zWorkingSet.__setstate__)N)N)NTF)NNFN)NNT)T)rrrrr��classmethodr�r�r�r�r�rWrQrr�r�rrPrrr0r1rrrrrf)s4





�
]�
T
c@seZdZdZddd�ZdS)r
z>
    Map each requirement to the extras that demanded it.
    Ncs2�fdd�|��d�|pdD�}�jp0t|�S)z�
        Evaluate markers for req against each extra that
        demanded it.

        Return False if the req has a marker and fails
        evaluation. Otherwise, return True.
        c3s|]}�j�d|i�VqdS)�extraN��marker�evaluate)r�r!�r�rrr��s�z*_ReqExtras.markers_pass.<locals>.<genexpr>rr)r�r#�any)r�r�rZextra_evalsrr%rr�s
�z_ReqExtras.markers_pass)N)rrrrrrrrrr
�sr
c@sxeZdZdZde�efdd�Zdd�Zdd�Zdd	d
�Z	dd�Z
d
d�Zddd�Zddd�Z
dd�Zdd�Zdd�ZdS)rez5Searchable snapshot of distributions on a search pathNcCs i|_||_||_|�|�dS)a!Snapshot distributions available on a search path

        Any distributions found on `search_path` are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.

        `platform` is an optional string specifying the name of the platform
        that platform-specific distributions must be compatible with.  If
        unspecified, it defaults to the current platform.  `python` is an
        optional string naming the desired version of Python (e.g. ``'3.6'``);
        it defaults to the current version.

        You may explicitly set `platform` (and/or `python`) to ``None`` if you
        wish to map *all* distributions, not just those compatible with the
        running platform or Python version.
        N)�_distmaprH�python�scan)r��search_pathrHr(rrrr��szEnvironment.__init__cCs2|jdkp|jdkp|j|jk}|o0t|j|j�S)z�Is distribution `dist` acceptable for this environment?

        The distribution must match the platform and python version
        requirements specified when this environment was created, or False
        is returned.
        N)r(�
py_versionrtrH)r�r�Z	py_compatrrr�can_add�s
�
�zEnvironment.can_addcCs|j|j�|�dS)z"Remove `dist` from the environmentN)r'r8�remover�rrrr-�szEnvironment.removecCs4|dkrtj}|D]}t|�D]}|�|�qqdS)adScan `search_path` for distributions usable in this environment

        Any distributions found are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.  Only distributions conforming to
        the platform/python version defined at initialization are added.
        N)rGr�rar�)r�r*rr�rrrr)�s
zEnvironment.scancCs|��}|j�|g�S)aReturn a newest-to-oldest list of distributions for `project_name`

        Uses case-insensitive `project_name` comparison, assuming all the
        project's distributions use their project's name converted to all
        lowercase as their key.

        )�lowerr'r�)r�rZdistribution_keyrrr�__getitem__�szEnvironment.__getitem__cCsL|�|�rH|��rH|j�|jg�}||krH|�|�|jt�d�dd�dS)zLAdd `dist` if we ``can_add()`` it and it has not already been added
        �hashcmpT�r8�reverseN)	r,�has_versionr'r�r8r�r�operator�
attrgetter)r�r�r�rrrr�s

zEnvironment.addFcCsfz|�|�}Wntk
r,|s$�d}YnX|dk	r:|S||jD]}||krD|SqD|�||�S)a�Find distribution best matching `req` and usable on `working_set`

        This calls the ``find(req)`` method of the `working_set` to see if a
        suitable distribution is already active.  (This may raise
        ``VersionConflict`` if an unsuitable version of the project is already
        active in the specified `working_set`.)  If a suitable distribution
        isn't active, this method returns the newest distribution in the
        environment that meets the ``Requirement`` in `req`.  If no suitable
        distribution is found, and `installer` is supplied, then the result of
        calling the environment's ``obtain(req, installer)`` method will be
        returned.
        N)r�rlr8�obtain)r�r�r_rrr�rrrrs

zEnvironment.best_matchcCs|dk	r||�SdS)a�Obtain a distribution matching `requirement` (e.g. via download)

        Obtain a distro that matches requirement (e.g. via download).  In the
        base ``Environment`` class, this routine just returns
        ``installer(requirement)``, unless `installer` is None, in which case
        None is returned instead.  This method is a hook that allows subclasses
        to attempt other ways of obtaining a distribution before falling back
        to the `installer` argument.Nr)r�Zrequirementrrrrr6+s	zEnvironment.obtainccs"|j��D]}||r
|Vq
dS)z=Yield the unique project names of the available distributionsN)r'r
�r�r8rrrr7szEnvironment.__iter__cCsVt|t�r|�|�n<t|t�rD|D]}||D]}|�|�q0q$ntd|f��|S)z2In-place addition of a distribution or environmentzCan't add %r to environment)r�rhr�rer�)r��otherZprojectr�rrr�__iadd__=s

zEnvironment.__iadd__cCs*|jgddd�}||fD]}||7}q|S)z4Add an environment or distribution to an environmentN)rHr(�r�)r�r8�newrrrr�__add__Is
zEnvironment.__add__)N)NF)N)rrrrrO�PY_MAJORr�r,r-r)r/r�rr6rr9r<rrrrre�s"�


�

c@seZdZdZdS)roaTAn error occurred extracting a resource

    The following attributes are available from instances of this exception:

    manager
        The resource manager that raised this exception

    cache_path
        The base directory for resource extraction

    original_error
        The exception instance that caused extraction to fail
    NrrrrrroUsc@s�eZdZdZdZdd�Zdd�Zdd�Zd	d
�Zdd�Z	d
d�Z
dd�Zdd�Zddd�Z
edd��Zdd�Zdd�Zd dd�ZdS)!rgz'Manage resource extraction and packagesNcCs
i|_dSr)�cached_filesr�rrrr�iszResourceManager.__init__cCst|��|�S)zDoes the named resource exist?)rRr��r�Zpackage_or_requirementr�rrrr\lszResourceManager.resource_existscCst|��|�S)z,Is the named resource an existing directory?)rRr]r?rrrr]ps�zResourceManager.resource_isdircCst|��||�S)z4Return a true filesystem path for specified resource)rRr�r?rrrrZvs�z!ResourceManager.resource_filenamecCst|��||�S)z9Return a readable file-like object for specified resource)rRr�r?rrrrY|s�zResourceManager.resource_streamcCst|��||�S)z%Return specified resource as a string)rRr�r?rrrrX�s�zResourceManager.resource_stringcCst|��|�S)z1List the contents of the named resource directory)rRr[r?rrrr[�s�z ResourceManager.resource_listdircCsRt��d}|jpt�}t�d���}t|jft	���}||_
||_||_|�dS)z5Give an error message for problems extracting file(s)ra
            Can't extract file(s) to egg cache

            The following error occurred while trying to extract file(s)
            to the Python egg cache:

              {old_exc}

            The Python egg cache directory is currently set to:

              {cache_path}

            Perhaps your account does not have write access to this directory?
            You can change the cache directory by setting the PYTHON_EGG_CACHE
            environment variable to point to an accessible directory.
            N)
rG�exc_info�extraction_pathrd�textwrap�dedent�lstripror�r�r��
cache_pathZoriginal_error)r��old_excrE�tmpl�errrrr�extraction_error�sz ResourceManager.extraction_errorrcCsf|jp
t�}tjj||df|��}zt|�Wntk
rL|��YnX|�|�d|j	|<|S)a�Return absolute location in cache for `archive_name` and `names`

        The parent directory of the resulting path will be created if it does
        not already exist.  `archive_name` should be the base filename of the
        enclosing egg (which may not be the name of the enclosing zipfile!),
        including its ".egg" extension.  `names`, if provided, should be a
        sequence of path name parts "under" the egg's extraction location.

        This method should only be called by resource providers that need to
        obtain an extraction location, and only for names they intend to
        extract, as it tracks the generated names for possible cleanup later.
        z-tmpr)
rArdr�r�rI�_bypass_ensure_directory�	ExceptionrI�_warn_unsafe_extraction_pathr>)r�Zarchive_name�namesZextract_pathZtarget_pathrrr�get_cache_path�s


zResourceManager.get_cache_pathcCsVtjdkr|�tjd�sdSt�|�j}|tj@s>|tj@rRd|}t�	|t
�dS)aN
        If the default extraction path is overridden and set to an insecure
        location, such as /tmp, it opens up an opportunity for an attacker to
        replace an extracted file with an unauthorized payload. Warn the user
        if a known insecure location is used.

        See Distribute #375 for more details.
        �ntZwindirNz�%s is writable by group/others and vulnerable to attack when used with get_resource_filename. Consider a more secure location (set with .set_extraction_path or the PYTHON_EGG_CACHE environment variable).)r�r�r��environ�stat�st_mode�S_IWOTH�S_IWGRP�warnings�warn�UserWarning)r��mode�msgrrrrL�s
��z,ResourceManager._warn_unsafe_extraction_pathcCs.tjdkr*t�|�jdBd@}t�||�dS)a4Perform any platform-specific postprocessing of `tempname`

        This is where Mac header rewrites should be done; other platforms don't
        have anything special they should do.

        Resource providers should call this method ONLY after successfully
        extracting a compressed resource.  They must NOT call it on resources
        that are already in the filesystem.

        `tempname` is the current (temporary) name of the file, and `filename`
        is the name it will be renamed to by the caller after this routine
        returns.
        �posiximi�N)r�r�rQrR�chmod)r�Ztempname�filenamerXrrr�postprocess�s
zResourceManager.postprocesscCs|jrtd��||_dS)a�Set the base path where resources will be extracted to, if needed.

        If you do not call this routine before any extractions take place, the
        path defaults to the return value of ``get_default_cache()``.  (Which
        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
        platform-specific fallbacks.  See that routine's documentation for more
        details.)

        Resources are extracted to subdirectories of this path based upon
        information given by the ``IResourceProvider``.  You may set this to a
        temporary directory, but then you must call ``cleanup_resources()`` to
        delete the extracted files when done.  There is no guarantee that
        ``cleanup_resources()`` will be able to remove all extracted files.

        (Note: you may not change the extraction path for a given resource
        manager once resources have been extracted, unless you first call
        ``cleanup_resources()``.)
        z5Can't change extraction path, files already extractedN)r>rLrA�r�r�rrrrb�s
�z#ResourceManager.set_extraction_pathFcCsdS)aB
        Delete all extracted resource files and directories, returning a list
        of the file and directory names that could not be successfully removed.
        This function does not have any concurrency protection, so it should
        generally only be called when the extraction path is a temporary
        directory exclusive to a single process.  This method is not
        automatically called; you must call it explicitly or register it as an
        ``atexit`` function if you wish to ensure cleanup of a temporary
        directory used for extractions.
        Nr)r��forcerrrrcsz!ResourceManager.cleanup_resources)r)F)rrrrrAr�r\r]rZrYrXr[rIrN�staticmethodrLr]rbrcrrrrrges 

cCstj�d�ptjdd�S)z�
    Return the ``PYTHON_EGG_CACHE`` environment variable
    or a platform-relevant user cache dir for an app
    named "Python-Eggs".
    ZPYTHON_EGG_CACHEzPython-Eggs)Zappname)r�rPr�rZuser_cache_dirrrrrrds
�cCst�dd|�S)z�Convert an arbitrary string to a standard distribution name

    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
    �[^A-Za-z0-9.]+�-)�re�subr�rrrrq%scCsJzttj�|��WStjjk
rD|�dd�}t�dd|�YSXdS)zB
    Convert an arbitrary string to a standard version string
    r�rBrarbN)r�rrrr r�rcrd)rrrrrr-s
cCst�dd|���S)z�Convert an arbitrary string to a standard 'extra' name

    Any runs of non-alphanumeric characters are replaced with a single '_',
    and the result is always lowercased.
    z[^A-Za-z0-9.-]+r�)rcrdr.)r!rrrrw9scCs|�dd�S)z|Convert a project or version name to its filename-escaped form

    Any '-' characters are currently replaced with '_'.
    rbr�rr�rrrrxBsc
CsHzt|�Wn6tk
rB}zd|_d|_|WY�Sd}~XYnXdS)zo
    Validate text as a PEP 508 environment marker; return an exception
    if invalid or False otherwise.
    NF)rz�SyntaxErrorr\�lineno)�text�errrryJsc
CsJztj�|�}|��WStjjk
rD}zt|��W5d}~XYnXdS)z�
    Evaluate a PEP 508 environment marker.
    Return a boolean indicating the marker result in this environment.
    Raise SyntaxError if marker is invalid.

    This implementation uses the 'pyparsing' module.
    N)rZmarkersZMarkerr$Z
InvalidMarkerre)rgr!r#rhrrrrzXs

c@s�eZdZdZdZdZdZdd�Zdd�Zdd�Z	d	d
�Z
dd�Zd
d�Zdd�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd �Zd!d"�Zd#d$�Zd%d&�Zed'd(��Zd)d*�ZdS)+r�zETry to implement resources and metadata for arbitrary PEP 302 loadersNcCs(t|dd�|_tj�t|dd��|_dS)Nr��__file__r�)r�r�r�r��dirname�module_path�r�r�rrrr�nszNullProvider.__init__cCs|�|j|�Sr)�_fnrk�r�r�r�rrrr�rsz"NullProvider.get_resource_filenamecCst�|�||��Sr)�io�BytesIOr�rnrrrr�usz NullProvider.get_resource_streamcCs|�|�|j|��Sr)�_getrmrkrnrrrr�xsz NullProvider.get_resource_stringcCs|�|�|j|��Sr)�_hasrmrk�r�r�rrrr�{szNullProvider.has_resourcecCs|�|j|�Sr)rm�egg_info�r�r�rrr�_get_metadata_path~szNullProvider._get_metadata_pathcCs |js|jS|�|�}|�|�Sr)rtrvrr�r�r�r�rrrr��s
zNullProvider.has_metadatac
Cst|js
dS|�|�}|�|�}tjr(|Sz|�d�WStk
rn}z|jd�||�7_�W5d}~XYnXdS)Nr��utf-8z in {} file at path: {})	rtrvrqr�PY2�decode�UnicodeDecodeError�reasonr�)r�r�r��value�excrrrr��s

zNullProvider.get_metadatacCst|�|��Sr�rur�rurrrr��szNullProvider.get_metadata_linescCs|�|�|j|��Sr)�_isdirrmrkrsrrrr]�szNullProvider.resource_isdircCs|jo|�|�|j|��Sr)rtr�rmrurrrr��szNullProvider.metadata_isdircCs|�|�|j|��Sr)�_listdirrmrkrsrrrr[�szNullProvider.resource_listdircCs|jr|�|�|j|��SgSr)rtr�rmrurrrr��szNullProvider.metadata_listdirc
Cs�d|}|�|�s$tdjft����|�|��dd�}|�dd�}|�|j|�}||d<tj	�
|�r�t|���}t
||d�}t|||�n>dd	lm}t|�d|�d�|f||<t
||d�}	t|	||�dS)
Nzscripts/z<Script {script!r} not found in metadata at {self.egg_info!r}z
�
�
ri�execr)�cache)r�rkr�r�r�r�rmrtr�r�r�r�read�compiler��	linecacher��lenr)
r�r�r�ZscriptZscript_textZscript_filename�source�coder�Zscript_coderrrrQ�s.
���zNullProvider.run_scriptcCstd��dS�Nz9Can't perform this operation for unregistered loader type��NotImplementedErrorr^rrrrr�s�zNullProvider._hascCstd��dSr�r�r^rrrr��s�zNullProvider._isdircCstd��dSr�r�r^rrrr��s�zNullProvider._listdircCs*|�|�|r&tjj|f|�d���S|S)N�/)�_validate_resource_pathr�r�rIr)r��baser�rrrrm�s
zNullProvider._fncCsptjj|�tj�kp&t�|�p&t�|�}|s0dSd}t�|�rPt�|�sPt|��t	j
|dd�dtdd�dS)aO
        Validate the resource paths according to the docs.
        https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access

        >>> warned = getfixture('recwarn')
        >>> warnings.simplefilter('always')
        >>> vrp = NullProvider._validate_resource_path
        >>> vrp('foo/bar.txt')
        >>> bool(warned)
        False
        >>> vrp('../foo/bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('/foo/bar.txt')
        >>> bool(warned)
        True
        >>> vrp('foo/../../bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('foo/f../bar.txt')
        >>> bool(warned)
        False

        Windows path separators are straight-up disallowed.
        >>> vrp(r'\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path is not allowed.

        >>> vrp(r'C:\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path is not allowed.

        Blank values are allowed

        >>> vrp('')
        >>> bool(warned)
        False

        Non-string values are not.

        >>> vrp(None)
        Traceback (most recent call last):
        ...
        AttributeError: ...
        Nz=Use of .. or absolute path in a resource path is not allowed.r�z/ and will raise exceptions in a future release.r��
stacklevel)r�r��pardirr�	posixpath�sep�isabs�ntpathrLrUrV�DeprecationWarning)r�ZinvalidrYrrrr��s6���z$NullProvider._validate_resource_pathcCs$t|jd�r|j�|�Std��dS)N�get_dataz=Can't perform this operation for loaders without 'get_data()')r�r�r�r�r^rrrrqs
�zNullProvider._get)rrrr�egg_namertr�r�r�r�r�r�rvr�r�r�r]r�r[r�rQrrr�r�rmr`r�rqrrrrr�gs2
Jc@s eZdZdZdd�Zdd�ZdS)r�z&Provider based on a virtual filesystemcCst�||�|��dSr)r�r��
_setup_prefixrlrrrr�-szEggProvider.__init__cCsZ|j}d}||krVt|�r@tj�|�|_tj�|d�|_||_qV|}tj�	|�\}}q
dS)N�EGG-INFO)
rk�_is_egg_pathr�r��basenamer�rIrt�egg_rootr)r�r��oldr�rrrr�1szEggProvider._setup_prefixN)rrrrr�r�rrrrr�*sc@sDeZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Ze	dd
��Z
dS)r�z6Provides access to package resources in the filesystemcCstj�|�Sr)r�r�r�r^rrrrrCszDefaultProvider._hascCstj�|�Sr)r�r�r
r^rrrr�FszDefaultProvider._isdircCs
t�|�Sr)r��listdirr^rrrr�IszDefaultProvider._listdircCst|�|j|�d�S�N�rb)rrmrkrnrrrr�Lsz#DefaultProvider.get_resource_streamc
Cs*t|d��}|��W5QR�SQRXdSr�)rr�)r�r��streamrrrrqOszDefaultProvider._getcCs,d}|D]}tt|td��}t||�qdS)N)�SourceFileLoader�SourcelessFileLoader)r��importlib_machinery�typer�)r�Zloader_namesr�Z
loader_clsrrr�	_registerSszDefaultProvider._registerN)rrrrrrr�r�r�rqr r�rrrrr�@sc@s8eZdZdZdZdd�ZZdd�Zdd�Zd	d
�Z	dS)r�z.Provider that returns nothing for all requestsNcCsdS�NFrr^rrrr?cr@zEmptyProvider.<lambda>cCsdS�Nr�rr^rrrrqeszEmptyProvider._getcCsgSrrr^rrrr�hszEmptyProvider._listdircCsdSrrr�rrrr�kszEmptyProvider.__init__)
rrrrrkr�rrrqr�r�rrrrr�^sc@s eZdZdZedd��ZeZdS)�ZipManifestsz
    zip manifest builder
    c
s@t�|��,��fdd����D�}t|�W5QR�SQRXdS)a
        Build a dictionary similar to the zipimport directory
        caches, except instead of tuples, store ZipInfo objects.

        Use a platform-specific path separator (os.sep) for the path keys
        for compatibility with pypy on Windows.
        c3s&|]}|�dtj���|�fVqdS)r�N)r�r�r�Zgetinfo�r�r��Zzfilerrr��s��z%ZipManifests.build.<locals>.<genexpr>N)�zipfileZZipFileZnamelistr')r�r�r+rr�r�buildws
	
�zZipManifests.buildN)rrrrr r��loadrrrrr�rs
r�c@s$eZdZdZe�dd�Zdd�ZdS)�MemoizedZipManifestsz%
    Memoized zipfile manifests.
    �manifest_modzmanifest mtimecCsRtj�|�}t�|�j}||ks.||j|krH|�|�}|�||�||<||jS)zW
        Load a manifest at path or return a suitable manifest already loaded.
        )	r�r��normpathrQ�st_mtime�mtimer�r��manifest)r�r�r�r�rrrr��s
zMemoizedZipManifests.loadN)rrrrr�
namedtupler�r�rrrrr��sr�c@s�eZdZdZdZe�Zdd�Zdd�Zdd�Z	e
d	d
��Zdd�Ze
d
d��Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd �ZdS)!r�z"Resource support for zips and eggsNcCs t�||�|jjtj|_dSr)r�r�r��archiver�r��zip_prerlrrrr��szZipProvider.__init__cCsP|�tj�}||jjkrdS|�|j�r:|t|j�d�Std||jf��dS)Nr��%s is not a subpath of %s)	�rstripr�r�r�r�r�r�r��AssertionError�r��fspathrrr�
_zipinfo_name�s�zZipProvider._zipinfo_namecCsP|j|}|�|jtj�r:|t|j�dd��tj�Std||jf��dS)Nrr�)r�r�r�r�r�r�rr�)r��zip_pathr�rrr�_parts�s
�zZipProvider._partscCs|j�|jj�Sr)�_zip_manifestsr�r�r�r�rrr�zipinfo�szZipProvider.zipinfocCs\|jstd��|�|�}|��}d�|�|��|krP|D]}|�||�|��q8|�||�S)Nz5resource_filename() only supported for .egg, not .zipr�)r�r��_resource_to_zip�_get_eager_resourcesrIr��_extract_resource�
_eager_to_zip)r�r�r�r��eagersr�rrrr��s�
z!ZipProvider.get_resource_filenamecCs"|j}|jd}t�|�}||fS)N)rrr�)�	file_size�	date_time�time�mktime)Zzip_stat�sizer��	timestamprrr�_get_date_and_size�s

zZipProvider._get_date_and_sizec
Csx||��kr@|��|D]}|�|tj�||��}qtj�|�S|�|j|�\}}ts`t	d��z�|�
|j|�|��}|�
||�r�|WStdtj�|�d�\}}	t�||j�|��t�|�t|	||f�|�|	|�zt|	|�Wnhtjk
�rNtj�|��rH|�
||��r |YWStjdk�rHt|�t|	|�|YWS�YnXWn tjk
�rr|��YnX|S)Nz>"os.rename" and "os.unlink" are not supported on this platformz	.$extract)�dirrO)�_indexr�r�r�rIrjr�r��
WRITE_SUPPORT�IOErrorrNr�r��_is_current�_mkstemp�writer�r��closerr]r
�error�isfiler�rrI)
r�r�r�r�Zlastr�r�Z	real_pathZoutfZtmpnamrrrr��sN��
�




zZipProvider._extract_resourcec		Csx|�|j|�\}}tj�|�s$dSt�|�}|j|ksB|j|krFdS|j�	|�}t
|d��}|��}W5QRX||kS)zK
        Return True if the file_path is current for this zip_path
        Fr�)r�r�r�r�r�rQ�st_sizer�r�r�rr�)	r�Z	file_pathr�r�r�rQZzip_contents�fZ
file_contentsrrrr�s
zZipProvider._is_currentcCs>|jdkr8g}dD]}|�|�r|�|�|��q||_|jS)N)znative_libs.txtzeager_resources.txt)r�r�rr�)r�r�r�rrrr�"s

z ZipProvider._get_eager_resourcesc	Cs�z|jWStk
r�i}|jD]V}|�tj�}|r"tj�|dd��}||krh||�|d�q"q2|��g||<q2q"||_|YSXdS)Nr�)	Z	_dirindex�AttributeErrorr�rr�r�rIr�r)r�Zindr��parts�parentrrrr�+s
zZipProvider._indexcCs |�|�}||jkp||��kSr)r�r�r�)r�r�r�rrrrr<s
zZipProvider._hascCs|�|�|��kSr)r�r�r�rrrr�@szZipProvider._isdircCst|���|�|�d��S�Nr)rr�r�r�r�rrrr�CszZipProvider._listdircCs|�|�|j|��Sr)r�rmr�rsrrrr�FszZipProvider._eager_to_zipcCs|�|�|j|��Sr)r�rmrkrsrrrr�IszZipProvider._resource_to_zip)rrrrr�r�r�r�r�r�r�r�r�r`r�r�r�r�r�rrr�r�r�r�rrrrr��s(



7	c@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)r�a*Metadata handler for standalone PKG-INFO files

    Usage::

        metadata = FileMetadata("/path/to/PKG-INFO")

    This provider rejects all data and metadata requests except for PKG-INFO,
    which is treated as existing, and will be the contents of the file at
    the provided location.
    cCs
||_dSr�r�r^rrrr�\szFileMetadata.__init__cCs|jSrr�rurrrrv_szFileMetadata._get_metadata_pathcCs|dkotj�|j�S)N�PKG-INFO)r�r�r�rurrrr�bszFileMetadata.has_metadatac	CsD|dkrtd��tj|jddd��}|��}W5QRX|�|�|S)Nr�z(No metadata except PKG-INFO is availablerxr�)�encoding�errors)r�rorr�r��_warn_on_replacement)r�r�r��metadatarrrr�es
zFileMetadata.get_metadatacCs2d�d�}||kr.d}|jft��}t�|�dS)Ns�rxz2{self.path} could not be properly decoded in UTF-8)rzr�r�rUrV)r�r�Zreplacement_charrGrYrrrr�ns

z!FileMetadata._warn_on_replacementcCst|�|��Srrrurrrr�vszFileMetadata.get_metadata_linesN)
rrrrr�rvr�r�r�r�rrrrr�Ps	c@seZdZdZdd�ZdS)r�asMetadata provider for egg directories

    Usage::

        # Development eggs:

        egg_info = "/path/to/PackageName.egg-info"
        base_dir = os.path.dirname(egg_info)
        metadata = PathMetadata(base_dir, egg_info)
        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)

        # Unpacked egg directories:

        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
        dist = Distribution.from_filename(egg_path, metadata=metadata)
    cCs||_||_dSr)rkrt)r�r�rtrrrr��szPathMetadata.__init__N�rrrrr�rrrrr�zsc@seZdZdZdd�ZdS)r�z Metadata provider for .egg filescCsD|jtj|_||_|jr0tj�|j|j�|_n|j|_|�	�dS)z-Create a metadata provider from a zipimporterN)
r�r�r�r�r��prefixr�rIrkr�)r��importerrrrr��szEggMetadata.__init__Nr�rrrrr��sr'��_distribution_finderscCs|t|<dS)axRegister `distribution_finder` to find distributions in sys.path items

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `distribution_finder` is a callable that, passed a path
    item and the importer instance, yields ``Distribution`` instances found on
    that path item.  See ``pkg_resources.find_on_path`` for an example.Nr�)�
importer_typeZdistribution_finderrrrr��scCst|�}tt|�}||||�S)z.Yield distributions accessible via `path_item`)rr�r�)�	path_item�onlyr��finderrrrra�s
c	cs�|j�d�rdSt|�}|�d�r2tj||d�V|r:dS|�d�D]|}t|�r�tj	�
||�}tt�
|�|�}|D]
}|VqrqD|���d�rDtj	�
||�}tt�
|��}||_t�|||�VqDdS)z@
    Find eggs in zip files; possibly multiple nested eggs.
    z.whlNr��r�r��
.dist-info)r��endswithr�r�rh�
from_filenamer[r�r�r�rI�find_eggs_in_zip�	zipimport�zipimporterr.rt�
from_location)	r�r�r�r�Zsubitem�subpathr�r�Zsubmetarrrr��s$

r�cCsdSr�r)r�r�r�rrr�find_nothing�sr�cCsdd�}t||dd�S)aL
    Given a list of filenames, return them in descending order
    by version number.

    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
    >>> _by_version_descending(names)
    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
    cSs2tj�|�\}}t�|�d�|g�}dd�|D�S)z6
        Parse each component of the filename
        rbcSsg|]}tj�|��qSr)rrr�)r��partrrr�
<listcomp>�sz?_by_version_descending.<locals>._by_version.<locals>.<listcomp>)r�r��splitext�	itertools�chainr)r��extr�rrr�_by_version�sz+_by_version_descending.<locals>._by_versionTr1)�sorted)rMrrrr�_by_version_descending�src
#s�t���t��r4tj�t�tj��d��d�VdSt��}��fdd�|D�}t	|�}|D]2}tj��|�}t
�|��}||�D]
}	|	Vq�q\dS)z6Yield distributions accessible on a sys.path directoryr�r�Nc3s|]}t�|��r|VqdSr)�dist_factory)r�r��r�r�rrr�s�zfind_on_path.<locals>.<genexpr>)�_normalize_cached�_is_unpacked_eggrhr�r�r�r�rI�safe_listdirrr)
r�r�r�r�ZfilteredZpath_item_entriesr��fullpath�factoryr�rrr�find_on_path�s(���rcCsH|��}tt|jd��}|r tS|s0t|�r0tS|sB|�d�rBtSt�S)z9
    Return a dist_factory for a path_item and entry
    )�	.egg-infor�z	.egg-link)	r.r&rr��distributions_from_metadatar�ra�resolve_egg_link�NoDists)r�r�r�r.Zis_metarrrrs������rc@s*eZdZdZdd�ZejreZdd�ZdS)rzS
    >>> bool(NoDists())
    False

    >>> list(NoDists()('anything'))
    []
    cCsdSr�rr�rrr�__bool__.szNoDists.__bool__cCstd�Sr�)�iter)r�r	rrr�__call__3szNoDists.__call__N)	rrrrrrryZ__nonzero__rrrrrr&s
rc
Csvzt�|�WSttfk
r$YnNtk
rp}z0|jtjtjtjfkpXt	|dd�dk}|s`�W5d}~XYnXdS)zI
    Attempt to list contents of path, but suppress some exceptions.
    ZwinerrorNir)
r�r��PermissionError�NotADirectoryError�OSError�errnoZENOTDIRZEACCESZENOENTr�)r�rhZ	ignorablerrrr7s�rccsftj�|�}tj�|�r:tt�|��dkr.dSt||�}nt|�}tj�|�}t	j
|||td�VdS)Nr)�
precedence)r�r�rjr
r�r�r�r�r�rhr�r�)r��rootr�r�rrrr
Ls�r
c	cs4t|��"}|D]}|��}|r|VqW5QRXdS)z1
    Yield non-empty lines from file at path
    N)r�strip)r�r��linerrr�non_empty_lines[s

rcs.t��}�fdd�|D�}tt|�}t|d�S)za
    Given a path to an .egg-link, resolve distributions
    present in the referenced path.
    c3s$|]}tj�tj���|�VqdSr)r�r�rIrj)r��refr�rrr�ls�z#resolve_egg_link.<locals>.<genexpr>r)rrra�next)r�Zreferenced_pathsZresolved_pathsZdist_groupsrr�rrfs
�
r�
FileFinder��_namespace_handlers)�_namespace_packagescCs|t|<dS)a�Register `namespace_handler` to declare namespace packages

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `namespace_handler` is a callable like this::

        def namespace_handler(importer, path_entry, moduleName, module):
            # return a path_entry to use for child packages

    Namespace handlers are only called if the importer object has already
    agreed that it can handle the relevant path item, and they should only
    return a subpath if the module __path__ does not already contain an
    equivalent subpath.  For an example namespace handler, see
    ``pkg_resources.file_ns_handler``.
    Nr)r�Znamespace_handlerrrrr�}sc	Cs�t|�}|dkrdSt���t�d�|�|�}W5QRX|dkrHdStj�|�}|dkr�t�	|�}tj|<g|_
t|�nt|d�s�t
d|��tt|�}|||||�}|dk	r�|j
}|�|�|�|�t|||�|S)zEEnsure that named package includes a subpath of path_item (if needed)N�ignore�__path__�Not a package:)rrU�catch_warnings�simplefilter�find_modulerGr�r��types�
ModuleTyper#�_set_parent_nsr�r�r�r r��load_module�_rebuild_mod_path)�packageNamer�r�r�r�Zhandlerr�r�rrr�
_handle_ns�s.







r.csjdd�tjD���fdd����fdd�}t||d�}dd�|D�}t|jt�r`||jd	d	�<n||_d	S)
zq
    Rebuild module.__path__ ensuring that all entries are ordered
    corresponding to their sys.path order
    cSsg|]}t|��qSr�r�r��prrrr��sz%_rebuild_mod_path.<locals>.<listcomp>cs.z��|�WStk
r(td�YSXdS)z/
        Workaround for #520 and #513.
        �infN)�indexrL�float)r�)�sys_pathrr�safe_sys_path_index�sz._rebuild_mod_path.<locals>.safe_sys_path_indexcs<|�tj�}��d�d}|d|�}�ttj�|���S)zR
        Return the ordinal of the path based on its position in sys.path
        rBrN)rr�r��countrrI)r��
path_partsZmodule_partsr�)�package_namer6rr�position_in_sys_path�sz/_rebuild_mod_path.<locals>.position_in_sys_path)r8cSsg|]}t|��qSrr/r0rrrr��sN)rGr�rr�r#r)�	orig_pathr9r�r:�new_pathr)r9r6r5rr,�s		r,cCs�t��z�|tkrW��dStj}|�d�\}}}|r|t|�|tkrLt|�ztj	|j
}Wntk
rztd|��YnXt�
|p�dg��|�t�
|g�|D]}t||�q�W5t��XdS)z9Declare that package 'packageName' is a namespace packageNrBr$)�_imp�acquire_lock�release_lockr!rGr��
rpartitionr^r�r�r#r�r�r�r�r.)r-r�r�r�r�rrrr^�s&cCsFt��z.t�|d�D]}t||�}|rt||�qW5t��XdS)zDEnsure that previously-declared namespace packages include path_itemrN)r=r>r?r!r�r.r�)r�r��packager�rrrr��s
cCsDtj�||�d�d�}t|�}|jD]}t|�|kr&q@q&|SdS)zBCompute an ns-package subpath for a filesystem or zipfile importerrBr�N)r�r�rIrrr#)r�r�r-r�r�Z
normalizedrrrr�file_ns_handler�s
rBcCsdSrr)r�r�r-r�rrr�null_ns_handler	srCcCs tj�tj�tj�t|����S)z1Normalize a file/dir name for comparison purposes)r�r��normcase�realpathr��
_cygwin_patch�r\rrrr|	scCstjdkrtj�|�S|S)a
    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
    symlink components. Using
    os.path.abspath() works around this limitation. A fix in os.getcwd()
    would probably better, in Cygwin even more so, except
    that this seems to be by design...
    �cygwin)rGrHr�r��abspathrGrrrrF	srFcCs8z
||WStk
r2t|�||<}|YSXdSr)r�r|)r\r��resultrrrr)	s

rcCs|���d�S)z7
    Determine if given path appears to be an egg.
    �.egg)r.r�r�rrrr�1	sr�cCs t|�otj�tj�|dd��S)z@
    Determine if given path appears to be an unpacked egg.
    r�r�)r�r�r�r�rIr�rrrr8	s�rcCs<|�d�}|��}|r8d�|�}ttj||tj|�dS)NrB)rrrI�setattrrGr�)r-r�r�r�rrrr*B	s


r*ccsZt|tj�r8|��D] }|��}|r|�d�s|Vqn|D]}t|�D]
}|VqHq<dS)z9Yield non-empty/non-comment lines of a string or sequence�#N)r�rr��
splitlinesrr�ru)�strs�sZssrrrruJ	s
z\w+(\.\w+)*$z�
    (?P<name>[^-]+) (
        -(?P<ver>[^-]+) (
            -py(?P<pyver>[^-]+) (
                -(?P<plat>.+)
            )?
        )?
    )?
    c@s�eZdZdZddd�Zdd�Zdd	�Zddd�Zd
d�Zddd�Z	e
�d�Ze
ddd��Ze
dd��Ze
ddd��Ze
ddd��ZdS) rjz3Object representing an advertised importable objectrNcCs<t|�std|��||_||_t|�|_t|�|_||_dS)NzInvalid module name)�MODULErLr��module_name�tuple�attrsrr�)r�r�rRrTrr�rrrr�j	s


zEntryPoint.__init__cCsHd|j|jf}|jr*|dd�|j�7}|jrD|dd�|j�7}|S)Nz%s = %s�:rBz [%s]�,)r�rRrTrIr)r�rPrrrr�s	szEntryPoint.__str__cCsdt|�S)NzEntryPoint.parse(%r)�r�r�rrrr�{	szEntryPoint.__repr__TcOs4|r|s|rtjdtdd�|r,|j||�|��S)zH
        Require packages for this EntryPoint, then resolve it.
        zJParameters to load are deprecated.  Call .resolve and .require separately.rCr�)rUrVr�rPr�)r�rPr>�kwargsrrrr�~	s�zEntryPoint.loadc
CsXt|jdgdd�}zt�t|j|�WStk
rR}ztt|���W5d}~XYnXdS)zD
        Resolve the entry point from its module and attrs.
        rr)�fromlist�levelN)	r�rR�	functools�reducer�rTr�r�r�)r�r�r~rrrr��	s
zEntryPoint.resolvecCsL|jr|jstd|��|j�|j�}tj||||jd�}tttj|��dS)Nz&Can't require() without a distribution)r)	rr�rnrr_r�rrr�)r�rrr�r+rrrrP�	s

zEntryPoint.requirez]\s*(?P<name>.+?)\s*=\s*(?P<module>[\w.]+)\s*(:\s*(?P<attr>[\w.]+))?\s*(?P<extras>\[.*\])?\s*$cCsf|j�|�}|sd}t||��|��}|�|d�}|drJ|d�d�nd}||d|d|||�S)aParse a single entry point from string `src`

        Entry point syntax follows the form::

            name = some.module:some.attr [extra1, extra2]

        The entry name and module name are required, but the ``:attrs`` and
        ``[extras]`` parts are optional
        z9EntryPoint must be in 'name=module:attrs [extras]' formatr�attrrBrr�r�)�patternrFrL�	groupdict�
_parse_extrasr)r��srcr�rNrY�resrrTrrrr��	s
zEntryPoint.parsecCs(|sdSt�d|�}|jr"t��|jS)Nr�x)rir��specsrLr)r�Zextras_specr�rrrr`�	szEntryPoint._parse_extrascCsVt|�std|��i}t|�D]2}|�||�}|j|krFtd||j��|||j<q|S)zParse an entry point groupzInvalid group namezDuplicate entry point)rQrLrur�r�)r�rK�linesr��thisr�eprrr�parse_group�	s

zEntryPoint.parse_groupcCstt|t�r|��}nt|�}i}|D]J\}}|dkrB|s:q$td��|��}||kr\td|��|�|||�||<q$|S)z!Parse a map of entry point groupsNz%Entry points must be listed in groupszDuplicate group name)r�r'r+rvrLrrh)r��datar��mapsrKrerrr�	parse_map�	s


zEntryPoint.parse_map)rrN)T)NN)N)N)N)rrrrr�r�r�r�r�rPrcr�r^r r�r`rhrkrrrrrjg	s$
	



�	
cCs>|sdStj�|�}|d�d�r:tj�|dd�d�S|S)Nr�r�zmd5=)r�)rr�Zurlparser�Z
urlunparse)rZparsedrrr�_remove_md5_fragment�	srlcCs@dd�}t||�}tt|�d�}|�d�\}}}t|���p>dS)z�
    Given an iterable of lines from a Metadata file, return
    the value of the Version field, if present, or None otherwise.
    cSs|���d�S)Nzversion:)r.r�)rrrr�is_version_line�	sz+_version_from_file.<locals>.is_version_liner�rUN)rrr�	partitionrrr)rermZ
version_linesrr�r}rrr�_version_from_file�	s

rocs�eZdZdZdZddddedefdd�ZedSdd��Z	dd	�Z
ed
d��Zdd
�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zedd��Zedd��Zdd�Zed d!��Zed"d#��Zed$d%��Zd&d'�ZdTd)d*�Zd+d,�Zd-d.�Zd/d0�ZdUd2d3�Z d4d5�Z!d6d7�Z"d8d9�Z#d:d;�Z$�fd<d=�Z%e&e'd>��s4[%edVd?d@��Z(dAdB�Z)dCdD�Z*dWdEdF�Z+dGdH�Z,dXdIdJ�Z-dKdL�Z.dMdN�Z/dOdP�Z0edQdR��Z1�Z2S)Yrhz5Wrap an actual or potential sys.path entry w/metadatar�NcCsFt|pd�|_|dk	r t|�|_||_||_||_||_|p>t|_	dS)NZUnknown)
rqrrr�_versionr+rHrrr��	_provider)r�rr�rrr+rHrrrrr�
s
zDistribution.__init__cKs~dgd\}}}}tj�|�\}}	|	��tkr^t|	��}t|�}
|
r^|
�dddd�\}}}}|||f||||d�|����S)Nrr�ZverZpyverrM)rrr+rH)r�r�r�r.�_distributionImpl�EGG_NAMErK�_reload_version)r�rr�r�r)rrr+rHrrFrrrr�
s.����zDistribution.from_locationcCs|Srrr�rrrrt#
szDistribution._reload_versioncCs(|j|j|jt|j�|jpd|jp$dfSr�)�parsed_versionrr8rlrr+rHr�rrrr0&
s�zDistribution.hashcmpcCs
t|j�Sr)�hashr0r�rrr�__hash__1
szDistribution.__hash__cCs|j|jkSr�r0�r�r8rrr�__lt__4
szDistribution.__lt__cCs|j|jkSrrxryrrr�__le__7
szDistribution.__le__cCs|j|jkSrrxryrrr�__gt__:
szDistribution.__gt__cCs|j|jkSrrxryrrr�__ge__=
szDistribution.__ge__cCst||j�sdS|j|jkSr�)r�r�r0ryrrr�__eq__@
szDistribution.__eq__cCs
||kSrrryrrr�__ne__F
szDistribution.__ne__cCs6z|jWStk
r0|j��|_}|YSXdSr)Z_keyr�rr.r7rrrr8M
s
zDistribution.keycCst|d�st|j�|_|jS)N�_parsed_version)r�r#rr�r�rrrruU
s
zDistribution.parsed_versioncCsXtjj}t|j|�}|sdS|js&dSt�d����dd�}t	�
|jft|��t
�dS)Na>
            '{project_name} ({version})' is being parsed as a legacy,
            non PEP 440,
            version. You may find odd behavior and sort order.
            In particular it will be sorted as less than 0.0. It
            is recommended to migrate to PEP 440 compatible
            versions.
            r�r�)rrr!r�r�rBrCrr�rUrVr��varsr)r�ZLVZ	is_legacyrGrrr�_warn_legacy_version\
s�	z!Distribution._warn_legacy_versioncCsZz|jWStk
rT|��}|dkrL|�|j�}d�|j|�}t||��|YSXdS)Nz4Missing 'Version:' header and/or {} file at path: {})rpr��_get_version�_get_metadata_path_for_display�PKG_INFOr�rL)r�rr�rYrrrrv
s��
zDistribution.versioncCs4z|jWStk
r,|�|���|_YnX|jS)z~
        A map of extra to its list of (direct) requirements
        for this distribution, including the null extra.
        )Z_Distribution__dep_mapr��_filter_extras�_build_dep_mapr�rrr�_dep_map�
s
zDistribution._dep_mapcCsrttd|��D]^}|}|�|�}|�d�\}}}|oDt|�pDt|�}|rNg}t|�pXd}|�|g��|�q|S)z�
        Given a mapping of extras to dependencies, strip off
        environment markers and filter out any dependencies
        not matching the markers.
        NrU)	rrrrnryrzrwr�r)�dmr!Z	new_extrar�r�r#Zfails_markerrrrr��
s
�zDistribution._filter_extrascCs@i}dD]2}t|�|��D]\}}|�|g��t|��qq|S)N)zrequires.txtzdepends.txt)rv�
_get_metadatar�rrp)r�r�r�r!r�rrrr��
s
zDistribution._build_dep_maprc	Csf|j}g}|�|�dd��|D]@}z|�|t|��Wq tk
r^td||f��Yq Xq |S)z@List of Requirements needed for this distro if `extras` are usedNrz%s has no such extra feature %r)r�rr�rwr�rn)r�rr�Zdepsrrrrr�
s
�zDistribution.requirescCs,z|j�|�}Wntk
r&YdSX|S)zK
        Return the path to the given metadata file, if available.
        z[could not detect])rqrvrKrwrrrr��
s
z+Distribution._get_metadata_path_for_displayccs$|�|�r |�|�D]
}|VqdSr)r�r�)r�r�rrrrr��
s
zDistribution._get_metadatacCs|�|j�}t|�}|Sr)r�r�ro)r�rerrrrr��
szDistribution._get_versionFcCsV|dkrtj}|j||d�|tjkrRt|j�|�d�D]}|tjkr:t|�q:dS)z>Ensure distribution is importable on `path` (default=sys.path)Nr�namespace_packages.txt)rGr�rr�rr�r�r^)r�r�r�Zpkgrrr�activate�
s


zDistribution.activatecCs8dt|j�t|j�|jptf}|jr4|d|j7}|S)z@Return what this distribution's standard .egg filename should bez
%s-%s-py%srb)rxrrr+r=rH)r�r\rrrr��
s�zDistribution.egg_namecCs |jrd||jfSt|�SdS)Nz%s (%s))rr�r�rrrr��
szDistribution.__repr__cCs@zt|dd�}Wntk
r(d}YnX|p0d}d|j|fS)Nrz[unknown version]z%s %s)r�rLr)r�rrrrr��
s
zDistribution.__str__cCs|�d�rt|��t|j|�S)zADelegate all unrecognized public attributes to .metadata providerr�)r�r�r�rq)r�r]rrr�__getattr__�
s
zDistribution.__getattr__cs.tttt|����tdd�|j��D��B�S)Ncss|]}|�d�s|VqdS�r�N)r�)r�r]rrrr�s
�z'Distribution.__dir__.<locals>.<genexpr>)rr�superrh�__dir__rqr�r:rrr�s���zDistribution.__dir__r�cKs|jt|�tj�|�|f|�Sr)r�rr�r�r�)r�r\r�r)rrrr�s
��zDistribution.from_filenamecCs<t|jtjj�r"d|j|jf}nd|j|jf}t�|�S)z?Return a ``Requirement`` that matches this distribution exactlyz%s==%sz%s===%s)r�rurrrrrir�)r��specrrrrszDistribution.as_requirementcCs.|�||�}|dkr&td||ff��|��S)z=Return the `name` entry point of `group` or raise ImportErrorNzEntry point %r not found)rVr�r�)r�rKr�rgrrrrTszDistribution.load_entry_pointcCsPz
|j}Wn,tk
r6t�|�d�|�}|_YnX|dk	rL|�|i�S|S)r�zentry_points.txtN)Z_ep_mapr�rjrkr�r�)r�rKZep_maprrrrU&s
�zDistribution.get_entry_mapcCs|�|��|�Sr�)rUr�rrrrrV2szDistribution.get_entry_infoc
Cs4|p|j}|sdSt|�}tj�|�}dd�|D�}t|�D]|\}}||kr^|rVq�q�dSq<||kr<|jtkr<|s�|||d�kr�dS|tjkr�|�	�|�
||�|�
||�q�q<|tjkr�|�	�|r�|�
d|�n
|�|�dSz|�||d�}	Wnt
k
�rY�q0Yq�X||	=||	=|	}q�dS)a�Ensure self.location is on path

        If replace=False (default):
            - If location is already in path anywhere, do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent.
              - Else: add to the end of path.
        If replace=True:
            - If location is already on path anywhere (not eggs)
              or higher priority than its parent (eggs)
              do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent,
                removing any lower-priority entries.
              - Else: add it to the front of path.
        NcSsg|]}|rt|�p|�qSrr/r0rrrr�Psz*Distribution.insert_on.<locals>.<listcomp>rr)rrr�r�rj�	enumeraterr}rG�check_version_conflictr	r�r3rL)
r�r��locr�ZnlocZbdirZnpathr1rZnprrrr6s@



zDistribution.insert_oncCs�|jdkrdSt�|�d��}t|j�}|�d�D]p}|tjks2||ks2|tkrRq2|dkr\q2t	tj|dd�}|r�t|��
|�s2|�
|j�r�q2td|||jf�q2dS)N�
setuptoolsr�z
top_level.txt)Z
pkg_resourcesr�ZsiterizIModule %s was already imported from %s, but %s is being added to sys.path)r8r'r(r�r|rrGr�r!r�r��
issue_warning)r�Znspr��modname�fnrrrr�zs*

�
�
��z#Distribution.check_version_conflictcCs6z
|jWn&tk
r0tdt|��YdSXdS)NzUnbuilt egg for FT)rrLr�r�r�rrrr3�s
zDistribution.has_versioncKs@d}|��D]}|�|t||d��q|�d|j�|jf|�S)z@Copy this distribution, substituting in any changed keyword argsz<project_name version py_version platform location precedenceNr�)rr�r�rqr�)r�r)rMr]rrr�clone�s
zDistribution.clonecCsdd�|jD�S)NcSsg|]}|r|�qSrr)r�Zdeprrrr��sz'Distribution.extras.<locals>.<listcomp>)r�r�rrrr�szDistribution.extras)N)r)NF)N)N)NF)3rrrrr�r=r}r�r r�rtr�r0rwrzr{r|r}r~rr8rur�rr�r`r�r�rr�r�r�r�r�r�r�r�r�r��objectr�rrTrUrVrr�r3r�r�
__classcell__rrr:rrh
st�










		

Dc@seZdZdd�ZdS)�EggInfoDistributioncCs|��}|r||_|S)a�
        Packages installed by distutils (e.g. numpy or scipy),
        which uses an old safe_version, and so
        their version numbers can get mangled when
        converted to filenames (e.g., 1.11.0.dev0+2329eae to
        1.11.0.dev0_2329eae). These distributions will not be
        parsed properly
        downstream by Distribution and safe_version, so
        take an extra step and try to get the version number from
        the metadata file itself instead of the filename.
        )r�rp)r�Z
md_versionrrrrt�sz#EggInfoDistribution._reload_versionN)rrrrtrrrrr��sr�c@s>eZdZdZdZe�d�Zedd��Z	edd��Z
dd	�Zd
S)�DistInfoDistributionzV
    Wrap an actual or potential sys.path entry
    w/metadata, .dist-info style.
    ZMETADATAz([\(,])\s*(\d.*?)\s*([,\)])cCsFz|jWStk
r@|�|j�}tj���|�|_|jYSXdS)zParse and cache metadataN)Z	_pkg_infor�r�r��email�parserZParserZparsestr)r�r�rrr�_parsed_pkg_info�sz%DistInfoDistribution._parsed_pkg_infocCs2z|jWStk
r,|��|_|jYSXdSr)�_DistInfoDistribution__dep_mapr��_compute_dependenciesr�rrrr��s

zDistInfoDistribution._dep_mapcs�dgi}|_g�|j�d�p gD]}��t|��q"�fdd�}t|d��}|d�|�|j�d�pjgD](}t|���}tt||��|�||<ql|S)z+Recompute this distribution's dependencies.Nz
Requires-Distc3s*�D] }|jr|j�d|i�r|VqdS)Nr!r")r!r��r�rr�reqs_for_extra�szBDistInfoDistribution._compute_dependencies.<locals>.reqs_for_extrazProvides-Extra)	r�r�Zget_allrrp�	frozensetrwrr)r�r�r�r��commonr!Zs_extrarr�rr��sz*DistInfoDistribution._compute_dependenciesN)rrrrr�rcr�ZEQEQr�r�r�r�rrrrr��s

	
r�)rKrr�cOsZd}t�}zt�|�j|kr&|d7}qWntk
r<YnXtj|d|di|��dS)Nrr�)r$rGr�r�rLrUrV)r>r)rZr.rrrr��sr�c@seZdZdd�ZdS)�RequirementParseErrorcCsd�|j�S)Nr�)rIr>r�rrrr��szRequirementParseError.__str__N)rrrr�rrrrr��sr�c	cs�tt|��}|D]l}d|kr.|d|�d��}|�d�rr|dd���}z|t|�7}Wntk
rpYdSXt|�VqdS)z�Yield ``Requirement`` objects for each specification in `strs`

    `strs` must be a string, or a (possibly-nested) iterable thereof.
    z #N�\���)rrur�r�rr�
StopIterationri)rOrerrrrrps

csPeZdZ�fdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Ze	d
d��Z
�ZS)ric
s�ztt|��|�Wn2tjjk
rF}ztt|���W5d}~XYnX|j|_	t
|j�}||��|_|_
dd�|jD�|_ttt|j��|_|j
|j|jt|j�|jr�t|j�ndf|_t|j�|_dS)z>DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!NcSsg|]}|j|jf�qSr)r4r)r�r�rrrr�#sz(Requirement.__init__.<locals>.<listcomp>)r�rir�rrZInvalidRequirementr�r�r�Zunsafe_namerqr.rr8�	specifierrdrSrrwrZurlr�r#�hashCmprv�_Requirement__hash)r�Zrequirement_stringrhrr:rrr�s$
��zRequirement.__init__cCst|t�o|j|jkSr)r�rir�ryrrrr~/s

�zRequirement.__eq__cCs
||kSrrryrrrr5szRequirement.__ne__cCs0t|t�r |j|jkrdS|j}|jj|dd�S)NFT)Zprereleases)r�rhr8rr��contains)r�rrrrr�8s

zRequirement.__contains__cCs|jSr)r�r�rrrrwDszRequirement.__hash__cCsdt|�S)NzRequirement.parse(%r)rWr�rrrr�GszRequirement.__repr__cCst|�\}|Sr)rp)rPr�rrrr�Js
zRequirement.parse)rrrr�r~rr�rwr�r`r�r�rrr:rriscCst|kr|tfS|S)zJ
    Ensure object appears in the mro even
    for old-style classes.
    )r�)�classesrrr�_always_objectPs
r�cCs<tt�t|dt|����}|D]}||kr||SqdS)z2Return an adapter factory for `ob` from `registry`r�N)r��inspectZgetmror�r�)�registryr9r(�trrrr�Zsr�cCstj�|�}tj|dd�dS)z1Ensure that the parent directory of `path` existsT)�exist_okN)r�r�rjr�makedirs)r�rjrrrr{bscCsXtstd��t|�\}}|rT|rTt|�sTt|�zt|d�Wntk
rRYnXdS)z/Sandbox-bypassing version of ensure_directory()z*"os.mkdir" not supported on this platform.i�N)r�r�rr
rJr	�FileExistsError)r�rjr\rrrrJhsrJccsvd}g}t|�D]V}|�d�r\|�d�rP|s0|r:||fV|dd���}g}qftd|��q|�|�q||fVdS)asSplit a string or iterable thereof into (section, content) pairs

    Each ``section`` is a stripped version of the section header ("[section]")
    and each ``content`` is a list of stripped lines excluding blank lines and
    comment-only lines.  If there are any such lines before the first section
    header, they're returned in a first ``section`` of ``None``.
    N�[�]rr�zInvalid section heading)rur�r�rrLr�)rPZsectionZcontentrrrrrvus


cOs*tj}ztt_tj||�W�S|t_XdSr)r�r�os_open�tempfileZmkstemp)r>r)Zold_openrrrr��s
r�r")�categoryr�cOs|||�|Srr)r�r>rXrrr�_call_aside�s
r�cs.t���|d<|��fdd�t��D��dS)z=Set up global resource manager (deliberately not state-saved)Z_managerc3s&|]}|�d�s|t�|�fVqdSr�)r�r�r��r�rrr��s
�z_initialize.<locals>.<genexpr>N)rgr%r�)r.rr�r�_initialize�s
�r�cCs|t��}td|d�|j}|j}|j}|j}|}tdd�|D��|dd�dd�g|_t	t
|jtj
��t��t��d	S)
aE
    Prepare the master working set and make the ``require()``
    API available.

    This function has explicit effects on the global state
    of pkg_resources. It is intended to be invoked once at
    the initialization of this module.

    Invocation by other packages is unsupported and done
    at their own risk.
    r�)r_css|]}|jdd�VqdS)FrN�r�)r�r�rrrr��s�z1_initialize_master_working_set.<locals>.<genexpr>cSs|jdd�S)NTrr�r�rrrr?�r@z0_initialize_master_working_set.<locals>.<lambda>F)rN)rfr�r*rPrWrrQrSr�rrr�rGr�r$r%r�)r_rPrWr`rQr�rrr�_initialize_master_working_set�s"
��r�c@seZdZdZdS)r�z�
    Base class for warning about deprecations in ``pkg_resources``

    This class is not derived from ``DeprecationWarning``, and as such is
    visible by default.
    Nrrrrrr��s)N)N)F)F)F)F)N)�rZ
__future__rrGr�ror�rcr(r�r�rUrQr[Zpkgutilr4rHrr�Zemail.parserr�rr�rBr�r�r�r�rr=r�Zimpr��	NameErrorrZpkg_resources.externrZpkg_resources.extern.six.movesrrrrr	r
rr�rr�Zos.pathr
rZimportlib.machinery�	machineryr�rr�rrrr�r�Z
__metaclass__�version_info�RuntimeErrorryrrrPr_r`Zresources_streamrcZresource_dirrYrbr]rXrWr[rZr\r�r r!�RuntimeWarningrr#r&r*r0r1r5r:r;r<Z
_sget_noneZ
_sset_nonerO�__all__rKrkrlr�rmrnr�r�r=r}r~rr�r�r�rRrJr�rDr�rEr�rsrtrQr�rSrTrUrVr�r�rfr'r
rer�rorgrdrqrrrwrxryrzr�r�r�r�r�r�r�r�r�r�r�r�r�r�r�rar�r�rrrrrr
rrZImpImporterr�rr�r.r,r^r�rBrCr|rFrr�rr*rurFrQ�VERBOSE�
IGNORECASErsrjrlrorhr�r�rrr�rLr�rprrir�r�r{rJrvr��filterwarningsr�r$r�r��Warningr�rrrr�<module>sZ



�2 




.
5	A
-*

 ""


	
�	
'3�
7


&PK�V[\0�[��#__pycache__/__init__.cpython-38.pycnu�[���U

�Qab=��G@s�dZddlmZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlZddlmZzddlZWnek
�rddlZYnXze Wne!k
�r*e"Z YnXddl#m$Z$ddl%m&Z&m'Z'm(Z(ddlm)Z)zddlm*Z*m+Z+m,Z,d	Z-Wnek
�r�d
Z-YnXddlm.Z/ddl0m1Z1m2Z2zddl3m4Z5e5j6Wnek
�r�dZ5YnXd
dl7m8Z8ddl#m9Z9ddl#m:Z:e;d�e;d�e;d�e;d�e<Z=dej>k�r@dk�rLnne?d��e$j@�r\dZAdZBdZCdZDdZEdZFdZGdZHdZIdZJdZKdZLdZMdZNdZOdZPdZQdZRdZSGdd�deT�ZUdd�ZViZWdd�ZXdd�ZYd d!�ZZd"d#�Z[d$d%�Z\d&d'�Z]d(d)�Z^d*d+�Z_Z`d,d-�Zad.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMddNddOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrgGZbGdsdI�dIec�ZdGdtdJ�dJed�ZeGdudv�dvee�ZfGdwdK�dKed�ZgGdxdL�dLed�ZhiZidyjjej>�ZkdzZld{Zmd
ZndZod|Zpd}dm�Zqd~d0�Zrgfdd��Zsd�d��Ztd�d��Zue�vd��Zwe�vd��ZxeuZyd�dR�Zzd�d/�Z{e{Z|d�d1�Z}d�d2�Z~�dd�d3�Zd�d4�Z�Gd�d`�d`�Z�Gd�da�dae��Z�Gd�dD�dD�Z�Gd�d��d�e��Z�Gd�dC�dC�Z�e�Z�Gd�dM�dMe?�Z�Gd�dE�dE�Z�d�dB�Z�d�dO�Z�d�dP�Z�d�dU�Z�d�dV�Z�d�dW�Z��dd�dX�Z�Gd�dg�dg�Z�eqe�e��Gd�dh�dhe��Z�Gd�di�die��Z�e����Gd�de�dee��Z�e��Z�Gd�d��d�e��Z�Gd�d��d�e��Z�Gd�dj�dje��Z�eqe
j�e��Gd�db�dbe��Z�Gd�dc�dce��Z�Gd�dd�dde��Z�eXd�id��d�dk�Z��dd�d?�Z��dd�d��Z�e�e
j�e���dd�d��Z�e�e�e��d�d��Z��dd�d��Z�d�d��Z�Gd�d��d��Z�d�d��Z�d�d��Z�d�d��Z�d�d��Z�e�ej�e��e�e5d���r�e�e5j�e��eXd�id��eXd�idd�dl�Z�d�dńZ�d�dDŽZ�d�d<�Z��dd�dn�Z�d�d˄Z�e�ej�e��e�e
j�e��e�e5d���r,e�e5j�e��d�d̈́Z�e�e�e��d�dZ�Z�d�dЄZ�ifd�d҄Z�d�dԄZ�d�dքZ�d�d؄Z�d�dS�Z�e�vdڡj�Z�e�vd�ej�ej�B�j�Z�Gd�dH�dH�Z�d�dބZ�d�d�Z�Gd�dF�dF�Z�Gd�d�d�eăZ�Gd�d�d�eăZ�e�e�e�d�Z�d�d�Z�Gd�d�d�eɃZ�d�dN�Z�Gd�dG�dGe:j�j̓Z�d�d�Z�d�d�Z�d�dY�Z�d�d�Z�d�dT�Z�d�d��Z�ej�d�eUd	d��d�d��Z�e�eփfd�d���Z�e�d�d���Z�Gd�dp�dpeكZ�dS(aZ
Package resource API
--------------------

A resource is a logical file contained within a package, or a logical
subdirectory thereof.  The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is.  Do not use os.path operations to manipulate resource
names being passed into the API.

The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files.  It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
�)�absolute_importN)�get_importer)�six)�urllib�map�filter)�utime)�mkdir�rename�unlinkTF)�open)�isdir�split�)�
py31compat)�appdirs)�	packagingz&pkg_resources.extern.packaging.versionz)pkg_resources.extern.packaging.specifiersz+pkg_resources.extern.packaging.requirementsz&pkg_resources.extern.packaging.markers)�r)r�zPython 3.4 or later is requiredc@seZdZdZdS)�
PEP440Warningza
    Used when there is an issue with a version or specifier not complying with
    PEP 440.
    N��__name__�
__module__�__qualname__�__doc__�rr�:/usr/lib/python3.8/site-packages/pkg_resources/__init__.pyrxsrcCs8ztj�|�WStjjk
r2tj�|�YSXdS�N)r�version�Version�InvalidVersion�
LegacyVersion)�vrrr�
parse_versionsr#cKs"t��|�t�t�||��dSr)�globals�update�_state_vars�dict�fromkeys)Zvartype�kwrrr�_declare_state�sr*cCs8i}t�}t��D] \}}|d|||�||<q|S)NZ_sget_)r$r&�items��state�g�kr"rrr�__getstate__�s
r0cCs8t�}|��D]$\}}|dt|||||�q|S)NZ_sset_)r$r+r&r,rrr�__setstate__�sr1cCs|��Sr)�copy��valrrr�
_sget_dict�sr5cCs|��|�|�dSr)�clearr%��key�obr-rrr�
_sset_dict�sr:cCs|��Sr)r0r3rrr�_sget_object�sr;cCs|�|�dSr)r1r7rrr�_sset_object�sr<cGsdSrr��argsrrr�<lambda>��r?cCsbt�}t�|�}|dk	r^tjdkr^z&dd�t�dd��|�d�f}Wntk
r\YnX|S)aZReturn this platform's maximum compatible version.

    distutils.util.get_platform() normally reports the minimum version
    of Mac OS X that would be required to *use* extensions produced by
    distutils.  But what we want when checking compatibility is to know the
    version of Mac OS X that we are *running*.  To allow usage of packages that
    explicitly require a newer version of Mac OS X, we must also know the
    current version of the OS.

    If this condition occurs for any other platform with a version in its
    platform strings, this function should be extended accordingly.
    N�darwinzmacosx-%s-%s�.�r)	�get_build_platform�macosVersionString�match�sys�platform�join�_macosx_vers�group�
ValueError)�plat�mrrr�get_supported_platform�s

&rO�require�
run_script�get_provider�get_distribution�load_entry_point�
get_entry_map�get_entry_info�iter_entry_points�resource_string�resource_stream�resource_filename�resource_listdir�resource_exists�resource_isdir�declare_namespace�working_set�add_activation_listener�find_distributions�set_extraction_path�cleanup_resources�get_default_cache�Environment�
WorkingSet�ResourceManager�Distribution�Requirement�
EntryPoint�ResolutionError�VersionConflict�DistributionNotFound�UnknownExtra�ExtractionError�parse_requirements�	safe_name�safe_version�get_platform�compatible_platforms�yield_lines�split_sections�
safe_extra�to_filename�invalid_marker�evaluate_marker�ensure_directory�normalize_path�EGG_DIST�BINARY_DIST�SOURCE_DIST�
CHECKOUT_DIST�DEVELOP_DIST�IMetadataProvider�IResourceProvider�FileMetadata�PathMetadata�EggMetadata�
EmptyProvider�empty_provider�NullProvider�EggProvider�DefaultProvider�ZipProvider�register_finder�register_namespace_handler�register_loader_type�fixup_namespace_packagesr�PkgResourcesDeprecationWarning�run_main�AvailableDistributionsc@seZdZdZdd�ZdS)rkz.Abstract base for dependency resolution errorscCs|jjt|j�Sr)�	__class__r�reprr>��selfrrr�__repr__�szResolutionError.__repr__N)rrrrr�rrrrrk�sc@s<eZdZdZdZedd��Zedd��Zdd�Zd	d
�Z	dS)rlz�
    An already-installed version conflicts with the requested version.

    Should be initialized with the installed Distribution and the requested
    Requirement.
    z3{self.dist} is installed but {self.req} is requiredcCs
|jdS�Nrr=r�rrr�distszVersionConflict.distcCs
|jdS�Nrr=r�rrr�reqszVersionConflict.reqcCs|jjft��Sr��	_template�format�localsr�rrr�reportszVersionConflict.reportcCs|s|S|j|f}t|�S)zt
        If required_by is non-empty, return a version of self that is a
        ContextualVersionConflict.
        )r>�ContextualVersionConflict)r��required_byr>rrr�with_contextszVersionConflict.with_contextN)
rrrrr��propertyr�r�r�r�rrrrrls

c@s&eZdZdZejdZedd��ZdS)r�z�
    A VersionConflict that accepts a third parameter, the set of the
    requirements that required the installed Distribution.
    z by {self.required_by}cCs
|jdS)NrCr=r�rrrr�*sz%ContextualVersionConflict.required_byN)rrrrrlr�r�r�rrrrr�"s
r�c@sHeZdZdZdZedd��Zedd��Zedd��Zd	d
�Z	dd�Z
d
S)rmz&A requested distribution was not foundzSThe '{self.req}' distribution was not found and is required by {self.requirers_str}cCs
|jdSr�r=r�rrrr�5szDistributionNotFound.reqcCs
|jdSr�r=r�rrr�	requirers9szDistributionNotFound.requirerscCs|js
dSd�|j�S)Nzthe applicationz, )r�rIr�rrr�
requirers_str=sz"DistributionNotFound.requirers_strcCs|jjft��Srr�r�rrrr�CszDistributionNotFound.reportcCs|��Sr)r�r�rrr�__str__FszDistributionNotFound.__str__N)rrrrr�r�r�r�r�r�r�rrrrrm/s


c@seZdZdZdS)rnz>Distribution doesn't have an "extra feature" of the given nameNrrrrrrnJsz{}.{}rrC���cCs|t|<dS)aRegister `provider_factory` to make providers for `loader_type`

    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
    and `provider_factory` is a function that, passed a *module* object,
    returns an ``IResourceProvider`` for that module.
    N)�_provider_factories)Zloader_typeZprovider_factoryrrrr�XscCstt|t�r$t�|�p"tt|��dSztj|}Wn&tk
rXt	|�tj|}YnXt
|dd�}tt|�|�S)z?Return an IResourceProvider for the named module or requirementr�
__loader__N)
�
isinstancerir_�findrP�strrG�modules�KeyError�
__import__�getattr�
_find_adapterr�)ZmoduleOrReq�module�loaderrrrrRbs
cCsd|s\t��d}|dkrLd}tj�|�rLttd�rLt�|�}d|krL|d}|�|�	d��|dS)Nr�z0/System/Library/CoreServices/SystemVersion.plist�	readPlistZProductVersionrB)
rHZmac_ver�os�path�exists�hasattr�plistlibr��appendr)�_cacherZplistZ
plist_contentrrrrJos

rJcCsddd��||�S)NZppc)ZPowerPCZPower_Macintosh)�get)�machinerrr�_macosx_archsr�cCs~ddlm}|�}tjdkrz|�d�szz>t�}t��d�dd�}dt	|d�t	|d	�t
|�fWStk
rxYnX|S)
z�Return this platform's string for platform-specific distributions

    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
    needs some hacks for Linux and Mac OS X.
    r)rsrAzmacosx-r� �_zmacosx-%d.%d-%sr)Z	sysconfigrsrGrH�
startswithrJr��uname�replace�intr�rL)rsrMrr�rrrrD�s

�rDzmacosx-(\d+)\.(\d+)-(.*)zdarwin-(\d+)\.(\d+)\.(\d+)-(.*)cCs�|dks|dks||krdSt�|�}|r�t�|�}|s�t�|�}|r�t|�d��}d|�d�|�d�f}|dkr||dks�|dkr�|d	kr�dSd
S|�d�|�d�ks�|�d�|�d�kr�d
St|�d��t|�d��kr�d
SdSd
S)z�Can code for the `provided` platform run on the `required` platform?

    Returns true if either platform is ``None``, or the platforms are equal.

    XXX Needs compatibility checks for Linux and other unixy OSes.
    NTrz%s.%srC�z10.3�z10.4Fr)rErF�darwinVersionStringr�rK)ZprovidedZrequiredZreqMacZprovMacZ
provDarwinZdversionZmacosversionrrrrt�s2


���cCs<t�d�j}|d}|��||d<t|�d�||�dS)z@Locate distribution `dist_spec` and run its `script_name` scriptrrrN�rG�	_getframe�	f_globalsr6rPrQ)Z	dist_spec�script_name�ns�namerrrrQ�s
cCs@t|tj�rt�|�}t|t�r(t|�}t|t�s<td|��|S)z@Return a current distribution object for a Requirement or stringz-Expected string, Requirement, or Distribution)r�r�string_typesri�parserRrh�	TypeError�r�rrrrS�s



cCst|��||�S)zDReturn `name` entry point of `group` for `dist` or raise ImportError)rSrT�r�rKr�rrrrT�scCst|��|�S)�=Return the entry point map for `group`, or the full entry map)rSrU)r�rKrrrrU�scCst|��||�S�z<Return the EntryPoint object for `group`+`name`, or ``None``)rSrVr�rrrrV�sc@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)r�cCsdS)z;Does the package's distribution contain the named metadata?Nr�r�rrr�has_metadata�szIMetadataProvider.has_metadatacCsdS)z'The named metadata resource as a stringNrr�rrr�get_metadata�szIMetadataProvider.get_metadatacCsdS)z�Yield named metadata resource as list of non-blank non-comment lines

       Leading and trailing whitespace is stripped from each line, and lines
       with ``#`` as the first non-blank character are omitted.Nrr�rrr�get_metadata_lines�sz$IMetadataProvider.get_metadata_linescCsdS)z>Is the named metadata a directory?  (like ``os.path.isdir()``)Nrr�rrr�metadata_isdirsz IMetadataProvider.metadata_isdircCsdS)z?List of metadata names in the directory (like ``os.listdir()``)Nrr�rrr�metadata_listdirsz"IMetadataProvider.metadata_listdircCsdS)z=Execute the named script in the supplied namespace dictionaryNr)r��	namespacerrrrQ	szIMetadataProvider.run_scriptN)	rrrr�r�r�r�r�rQrrrrr��sc@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)r�z3An object that provides access to package resourcescCsdS)zdReturn a true filesystem path for `resource_name`

        `manager` must be an ``IResourceManager``Nr��manager�
resource_namerrr�get_resource_filenamesz'IResourceProvider.get_resource_filenamecCsdS)ziReturn a readable file-like object for `resource_name`

        `manager` must be an ``IResourceManager``Nrr�rrr�get_resource_streamsz%IResourceProvider.get_resource_streamcCsdS)zmReturn a string containing the contents of `resource_name`

        `manager` must be an ``IResourceManager``Nrr�rrr�get_resource_stringsz%IResourceProvider.get_resource_stringcCsdS)z,Does the package contain the named resource?Nr�r�rrr�has_resourceszIResourceProvider.has_resourcecCsdS)z>Is the named resource a directory?  (like ``os.path.isdir()``)Nrr�rrrr]"sz IResourceProvider.resource_isdircCsdS)z?List of resource names in the directory (like ``os.listdir()``)Nrr�rrrr[%sz"IResourceProvider.resource_listdirN)
rrrrr�r�r�r�r]r[rrrrr�
sc@s�eZdZdZd'dd�Zedd��Zedd��Zd	d
�Zdd�Z	d
d�Z
d(dd�Zdd�Zdd�Z
d)dd�Zd*dd�Zd+dd�Zdd�Zd,dd �Zd!d"�Zd#d$�Zd%d&�ZdS)-rfzDA collection of active distributions on sys.path (or a similar list)NcCs>g|_i|_i|_g|_|dkr&tj}|D]}|�|�q*dS)z?Create working set from list of path entries (default=sys.path)N)�entries�
entry_keys�by_key�	callbacksrGr��	add_entry)r�r��entryrrr�__init__,szWorkingSet.__init__cCsb|�}zddlm}Wntk
r.|YSXz|�|�Wntk
r\|�|�YSX|S)z1
        Prepare the master working set.
        r)�__requires__)�__main__r��ImportErrorrPrl�_build_from_requirements)�cls�wsr�rrr�
_build_master9s
zWorkingSet._build_mastercCsf|g�}t|�}|�|t��}|D]}|�|�q"tjD]}||jkr8|�|�q8|jtjdd�<|S)zQ
        Build a working set from a requirement spec. Rewrites sys.path.
        N)rp�resolvere�addrGr�r�r�)r�Zreq_specr��reqs�distsr�r�rrrr�Ms

z#WorkingSet._build_from_requirementscCs<|j�|g�|j�|�t|d�D]}|�||d�q$dS)a�Add a path item to ``.entries``, finding any distributions on it

        ``find_distributions(entry, True)`` is used to find distributions
        corresponding to the path entry, and they are added.  `entry` is
        always appended to ``.entries``, even if it is already present.
        (This is because ``sys.path`` can contain the same value more than
        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
        equal ``sys.path``.)
        TFN)r��
setdefaultr�r�rar�)r�r�r�rrrr�cs
zWorkingSet.add_entrycCs|j�|j�|kS)z9True if `dist` is the active distribution for its project)r�r�r8�r�r�rrr�__contains__rszWorkingSet.__contains__cCs,|j�|j�}|dk	r(||kr(t||��|S)a�Find a distribution matching requirement `req`

        If there is an active distribution for the requested project, this
        returns it as long as it meets the version requirement specified by
        `req`.  But, if there is an active distribution for the project and it
        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
        If there is no active distribution for the requested project, ``None``
        is returned.
        N)r�r�r8rl)r�r�r�rrrr�vs

zWorkingSet.findcs��fdd�|D�S)aYield entry point objects from `group` matching `name`

        If `name` is None, yields all entry points in `group` from all
        distributions in the working set, otherwise only ones matching
        both `group` and `name` are yielded (in distribution order).
        c3s8|]0}|�����D]}�dks*�|jkr|VqqdSr)rU�valuesr�)�.0r�r��rKr�rr�	<genexpr>�s
�z/WorkingSet.iter_entry_points.<locals>.<genexpr>r�r�rKr�rr�rrW�s�zWorkingSet.iter_entry_pointscCs>t�d�j}|d}|��||d<|�|�d�||�dS)z?Locate distribution for `requires` and run `script_name` scriptrrrNr�)r��requiresr�r�r�rrrrQ�s
zWorkingSet.run_scriptccsLi}|jD]<}||jkrq
|j|D] }||kr$d||<|j|Vq$q
dS)z�Yield distributions for non-duplicate projects in the working set

        The yield order is the order in which the items' path entries were
        added to the working set.
        rN)r�r�r�)r��seen�itemr8rrr�__iter__�s

zWorkingSet.__iter__TFcCs�|r|j|j||d�|dkr$|j}|j�|g�}|j�|jg�}|sV|j|jkrVdS||j|j<|j|krx|�|j�|j|kr�|�|j�|�|�dS)aAdd `dist` to working set, associated with `entry`

        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
        On exit from this routine, `entry` is added to the end of the working
        set's ``.entries`` (if it wasn't already present).

        `dist` is only added to the working set if it's for a project that
        doesn't already have a distribution in the set, unless `replace=True`.
        If it's added, any callbacks registered with the ``subscribe()`` method
        will be called.
        �r�N)	�	insert_onr��locationr�r�r8r�r��
_added_new)r�r�r��insertr��keysZkeys2rrrr��s

zWorkingSet.addcCsxt|�ddd�}i}i}g}t�}	t�t�}
|�rt|�d�}||krHq.|	�||�sVq.|�|j�}|dk�r|j	�|j�}|dks�||kr�|r�|}
|dkr�|dkr�t
|j�}nt
g�}tg�}
|j
||
||d�}||j<|dkr�|
�|d�}t||��|�|�||k�r$|
|}t||��|��|�|j�ddd�}|�|�|D] }|
|�|j�|j|	|<�qHd||<q.|S)a�List all distributions needed to (recursively) meet `requirements`

        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
        if supplied, should be an ``Environment`` instance.  If
        not supplied, it defaults to all distributions available within any
        entry or distribution in the working set.  `installer`, if supplied,
        will be invoked with each requirement that cannot be met by an
        already-installed distribution; it should return a ``Distribution`` or
        ``None``.

        Unless `replace_conflicting=True`, raises a VersionConflict exception
        if
        any requirements are found on the path that have the correct name but
        the wrong version.  Otherwise, if an `installer` is supplied it will be
        invoked to obtain the correct version of the requirement and activate
        it.

        `extras` is a list of the extras to be used with these requirements.
        This is important because extra requirements may look like `my_req;
        extra = "my_extra"`, which would otherwise be interpreted as a purely
        optional requirement.  Instead, we want to be able to assert that these
        requirements are truly required.
        Nr�r)�replace_conflictingT)�list�
_ReqExtras�collections�defaultdict�set�pop�markers_passr�r8r�rer�rf�
best_matchrmr�rlr�r�extras�extendr��project_name)r��requirements�env�	installerrrZ	processedZbestZto_activateZ
req_extrasr�r�r�r�r�Z
dependent_reqZnew_requirementsZnew_requirementrrrr��sT


�




zWorkingSet.resolvecCs
t|�}|��i}i}|dkr4t|j�}||7}n||}|�g�}	tt|	j|��|D]�}
||
D]�}|��g}z|	�|||�}
WnBt	k
r�}z$|||<|r�WY�qfn
WY�qZW5d}~XYqfXtt|	j|
��|�
t�|
��qZqfqZt|�}|��||fS)asFind all activatable distributions in `plugin_env`

        Example usage::

            distributions, errors = working_set.find_plugins(
                Environment(plugin_dirlist)
            )
            # add plugins+libs to sys.path
            map(working_set.add, distributions)
            # display errors
            print('Could not load', errors)

        The `plugin_env` should be an ``Environment`` instance that contains
        only distributions that are in the project's "plugin directory" or
        directories. The `full_env`, if supplied, should be an ``Environment``
        contains all currently-available distributions.  If `full_env` is not
        supplied, one is created automatically from the ``WorkingSet`` this
        method is called on, which will typically mean that every directory on
        ``sys.path`` will be scanned for distributions.

        `installer` is a standard installer callback as used by the
        ``resolve()`` method. The `fallback` flag indicates whether we should
        attempt to resolve older versions of a plugin if the newest version
        cannot be resolved.

        This method returns a 2-tuple: (`distributions`, `error_info`), where
        `distributions` is a list of the distributions found in `plugin_env`
        that were loadable, along with any other distributions that are needed
        to resolve their dependencies.  `error_info` is a dictionary mapping
        unloadable plugin distributions to an exception instance describing the
        error that occurred. Usually this will be a ``DistributionNotFound`` or
        ``VersionConflict`` instance.
        N)
r�sortrer�r�rr��as_requirementr�rkr%r'r()r�Z
plugin_envZfull_envrZfallbackZplugin_projectsZ
error_infoZ
distributionsrZ
shadow_setrr�r�Z	resolveesr"rrr�find_plugins's4$




zWorkingSet.find_pluginscGs&|�t|��}|D]}|�|�q|S)a�Ensure that distributions matching `requirements` are activated

        `requirements` must be a string or a (possibly-nested) sequence
        thereof, specifying the distributions and versions required.  The
        return value is a sequence of the distributions that needed to be
        activated to fulfill the requirements; all relevant distributions are
        included, even if they were already activated in this working set.
        )r�rpr�)r�rZneededr�rrrrP{s	zWorkingSet.requirecCs8||jkrdS|j�|�|s"dS|D]}||�q&dS)z�Invoke `callback` for all distributions

        If `existing=True` (default),
        call on all existing ones, as well.
        N)r�r�)r��callback�existingr�rrr�	subscribe�s
zWorkingSet.subscribecCs|jD]}||�qdSr)r�)r�r�rrrrr�s
zWorkingSet._added_newcCs,|jdd�|j��|j��|jdd�fSr)r�r�r2r�r�r�rrrr0�s
�zWorkingSet.__getstate__cCs@|\}}}}|dd�|_|��|_|��|_|dd�|_dSr)r�r2r�r�r�)r�Ze_k_b_cr�r
r�r�rrrr1�s


zWorkingSet.__setstate__)N)N)NTF)NNFN)NNT)T)rrrrr��classmethodr�r�r�r�r�rWrQrr�r�rrPrrr0r1rrrrrf)s4





�
]�
T
c@seZdZdZddd�ZdS)r
z>
    Map each requirement to the extras that demanded it.
    Ncs2�fdd�|��d�|pdD�}�jp0t|�S)z�
        Evaluate markers for req against each extra that
        demanded it.

        Return False if the req has a marker and fails
        evaluation. Otherwise, return True.
        c3s|]}�j�d|i�VqdS)�extraN��marker�evaluate)r�r!�r�rrr��s�z*_ReqExtras.markers_pass.<locals>.<genexpr>rr)r�r#�any)r�r�rZextra_evalsrr%rr�s
�z_ReqExtras.markers_pass)N)rrrrrrrrrr
�sr
c@sxeZdZdZde�efdd�Zdd�Zdd�Zdd	d
�Z	dd�Z
d
d�Zddd�Zddd�Z
dd�Zdd�Zdd�ZdS)rez5Searchable snapshot of distributions on a search pathNcCs i|_||_||_|�|�dS)a!Snapshot distributions available on a search path

        Any distributions found on `search_path` are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.

        `platform` is an optional string specifying the name of the platform
        that platform-specific distributions must be compatible with.  If
        unspecified, it defaults to the current platform.  `python` is an
        optional string naming the desired version of Python (e.g. ``'3.6'``);
        it defaults to the current version.

        You may explicitly set `platform` (and/or `python`) to ``None`` if you
        wish to map *all* distributions, not just those compatible with the
        running platform or Python version.
        N)�_distmaprH�python�scan)r��search_pathrHr(rrrr��szEnvironment.__init__cCs2|jdkp|jdkp|j|jk}|o0t|j|j�S)z�Is distribution `dist` acceptable for this environment?

        The distribution must match the platform and python version
        requirements specified when this environment was created, or False
        is returned.
        N)r(�
py_versionrtrH)r�r�Z	py_compatrrr�can_add�s
�
�zEnvironment.can_addcCs|j|j�|�dS)z"Remove `dist` from the environmentN)r'r8�remover�rrrr-�szEnvironment.removecCs4|dkrtj}|D]}t|�D]}|�|�qqdS)adScan `search_path` for distributions usable in this environment

        Any distributions found are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.  Only distributions conforming to
        the platform/python version defined at initialization are added.
        N)rGr�rar�)r�r*rr�rrrr)�s
zEnvironment.scancCs|��}|j�|g�S)aReturn a newest-to-oldest list of distributions for `project_name`

        Uses case-insensitive `project_name` comparison, assuming all the
        project's distributions use their project's name converted to all
        lowercase as their key.

        )�lowerr'r�)r�rZdistribution_keyrrr�__getitem__�szEnvironment.__getitem__cCsL|�|�rH|��rH|j�|jg�}||krH|�|�|jt�d�dd�dS)zLAdd `dist` if we ``can_add()`` it and it has not already been added
        �hashcmpT�r8�reverseN)	r,�has_versionr'r�r8r�r�operator�
attrgetter)r�r�r�rrrr�s

zEnvironment.addFcCsfz|�|�}Wntk
r,|s$�d}YnX|dk	r:|S||jD]}||krD|SqD|�||�S)a�Find distribution best matching `req` and usable on `working_set`

        This calls the ``find(req)`` method of the `working_set` to see if a
        suitable distribution is already active.  (This may raise
        ``VersionConflict`` if an unsuitable version of the project is already
        active in the specified `working_set`.)  If a suitable distribution
        isn't active, this method returns the newest distribution in the
        environment that meets the ``Requirement`` in `req`.  If no suitable
        distribution is found, and `installer` is supplied, then the result of
        calling the environment's ``obtain(req, installer)`` method will be
        returned.
        N)r�rlr8�obtain)r�r�r_rrr�rrrrs

zEnvironment.best_matchcCs|dk	r||�SdS)a�Obtain a distribution matching `requirement` (e.g. via download)

        Obtain a distro that matches requirement (e.g. via download).  In the
        base ``Environment`` class, this routine just returns
        ``installer(requirement)``, unless `installer` is None, in which case
        None is returned instead.  This method is a hook that allows subclasses
        to attempt other ways of obtaining a distribution before falling back
        to the `installer` argument.Nr)r�Zrequirementrrrrr6+s	zEnvironment.obtainccs"|j��D]}||r
|Vq
dS)z=Yield the unique project names of the available distributionsN)r'r
�r�r8rrrr7szEnvironment.__iter__cCsVt|t�r|�|�n<t|t�rD|D]}||D]}|�|�q0q$ntd|f��|S)z2In-place addition of a distribution or environmentzCan't add %r to environment)r�rhr�rer�)r��otherZprojectr�rrr�__iadd__=s

zEnvironment.__iadd__cCs*|jgddd�}||fD]}||7}q|S)z4Add an environment or distribution to an environmentN)rHr(�r�)r�r8�newrrrr�__add__Is
zEnvironment.__add__)N)NF)N)rrrrrO�PY_MAJORr�r,r-r)r/r�rr6rr9r<rrrrre�s"�


�

c@seZdZdZdS)roaTAn error occurred extracting a resource

    The following attributes are available from instances of this exception:

    manager
        The resource manager that raised this exception

    cache_path
        The base directory for resource extraction

    original_error
        The exception instance that caused extraction to fail
    NrrrrrroUsc@s�eZdZdZdZdd�Zdd�Zdd�Zd	d
�Zdd�Z	d
d�Z
dd�Zdd�Zddd�Z
edd��Zdd�Zdd�Zd dd�ZdS)!rgz'Manage resource extraction and packagesNcCs
i|_dSr)�cached_filesr�rrrr�iszResourceManager.__init__cCst|��|�S)zDoes the named resource exist?)rRr��r�Zpackage_or_requirementr�rrrr\lszResourceManager.resource_existscCst|��|�S)z,Is the named resource an existing directory?)rRr]r?rrrr]ps�zResourceManager.resource_isdircCst|��||�S)z4Return a true filesystem path for specified resource)rRr�r?rrrrZvs�z!ResourceManager.resource_filenamecCst|��||�S)z9Return a readable file-like object for specified resource)rRr�r?rrrrY|s�zResourceManager.resource_streamcCst|��||�S)z%Return specified resource as a string)rRr�r?rrrrX�s�zResourceManager.resource_stringcCst|��|�S)z1List the contents of the named resource directory)rRr[r?rrrr[�s�z ResourceManager.resource_listdircCsRt��d}|jpt�}t�d���}t|jft	���}||_
||_||_|�dS)z5Give an error message for problems extracting file(s)ra
            Can't extract file(s) to egg cache

            The following error occurred while trying to extract file(s)
            to the Python egg cache:

              {old_exc}

            The Python egg cache directory is currently set to:

              {cache_path}

            Perhaps your account does not have write access to this directory?
            You can change the cache directory by setting the PYTHON_EGG_CACHE
            environment variable to point to an accessible directory.
            N)
rG�exc_info�extraction_pathrd�textwrap�dedent�lstripror�r�r��
cache_pathZoriginal_error)r��old_excrE�tmpl�errrrr�extraction_error�sz ResourceManager.extraction_errorrcCsf|jp
t�}tjj||df|��}zt|�Wntk
rL|��YnX|�|�d|j	|<|S)a�Return absolute location in cache for `archive_name` and `names`

        The parent directory of the resulting path will be created if it does
        not already exist.  `archive_name` should be the base filename of the
        enclosing egg (which may not be the name of the enclosing zipfile!),
        including its ".egg" extension.  `names`, if provided, should be a
        sequence of path name parts "under" the egg's extraction location.

        This method should only be called by resource providers that need to
        obtain an extraction location, and only for names they intend to
        extract, as it tracks the generated names for possible cleanup later.
        z-tmpr)
rArdr�r�rI�_bypass_ensure_directory�	ExceptionrI�_warn_unsafe_extraction_pathr>)r�Zarchive_name�namesZextract_pathZtarget_pathrrr�get_cache_path�s


zResourceManager.get_cache_pathcCsVtjdkr|�tjd�sdSt�|�j}|tj@s>|tj@rRd|}t�	|t
�dS)aN
        If the default extraction path is overridden and set to an insecure
        location, such as /tmp, it opens up an opportunity for an attacker to
        replace an extracted file with an unauthorized payload. Warn the user
        if a known insecure location is used.

        See Distribute #375 for more details.
        �ntZwindirNz�%s is writable by group/others and vulnerable to attack when used with get_resource_filename. Consider a more secure location (set with .set_extraction_path or the PYTHON_EGG_CACHE environment variable).)r�r�r��environ�stat�st_mode�S_IWOTH�S_IWGRP�warnings�warn�UserWarning)r��mode�msgrrrrL�s
��z,ResourceManager._warn_unsafe_extraction_pathcCs.tjdkr*t�|�jdBd@}t�||�dS)a4Perform any platform-specific postprocessing of `tempname`

        This is where Mac header rewrites should be done; other platforms don't
        have anything special they should do.

        Resource providers should call this method ONLY after successfully
        extracting a compressed resource.  They must NOT call it on resources
        that are already in the filesystem.

        `tempname` is the current (temporary) name of the file, and `filename`
        is the name it will be renamed to by the caller after this routine
        returns.
        �posiximi�N)r�r�rQrR�chmod)r�Ztempname�filenamerXrrr�postprocess�s
zResourceManager.postprocesscCs|jrtd��||_dS)a�Set the base path where resources will be extracted to, if needed.

        If you do not call this routine before any extractions take place, the
        path defaults to the return value of ``get_default_cache()``.  (Which
        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
        platform-specific fallbacks.  See that routine's documentation for more
        details.)

        Resources are extracted to subdirectories of this path based upon
        information given by the ``IResourceProvider``.  You may set this to a
        temporary directory, but then you must call ``cleanup_resources()`` to
        delete the extracted files when done.  There is no guarantee that
        ``cleanup_resources()`` will be able to remove all extracted files.

        (Note: you may not change the extraction path for a given resource
        manager once resources have been extracted, unless you first call
        ``cleanup_resources()``.)
        z5Can't change extraction path, files already extractedN)r>rLrA�r�r�rrrrb�s
�z#ResourceManager.set_extraction_pathFcCsdS)aB
        Delete all extracted resource files and directories, returning a list
        of the file and directory names that could not be successfully removed.
        This function does not have any concurrency protection, so it should
        generally only be called when the extraction path is a temporary
        directory exclusive to a single process.  This method is not
        automatically called; you must call it explicitly or register it as an
        ``atexit`` function if you wish to ensure cleanup of a temporary
        directory used for extractions.
        Nr)r��forcerrrrcsz!ResourceManager.cleanup_resources)r)F)rrrrrAr�r\r]rZrYrXr[rIrN�staticmethodrLr]rbrcrrrrrges 

cCstj�d�ptjdd�S)z�
    Return the ``PYTHON_EGG_CACHE`` environment variable
    or a platform-relevant user cache dir for an app
    named "Python-Eggs".
    ZPYTHON_EGG_CACHEzPython-Eggs)Zappname)r�rPr�rZuser_cache_dirrrrrrds
�cCst�dd|�S)z�Convert an arbitrary string to a standard distribution name

    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
    �[^A-Za-z0-9.]+�-)�re�subr�rrrrq%scCsJzttj�|��WStjjk
rD|�dd�}t�dd|�YSXdS)zB
    Convert an arbitrary string to a standard version string
    r�rBrarbN)r�rrrr r�rcrd)rrrrrr-s
cCst�dd|���S)z�Convert an arbitrary string to a standard 'extra' name

    Any runs of non-alphanumeric characters are replaced with a single '_',
    and the result is always lowercased.
    z[^A-Za-z0-9.-]+r�)rcrdr.)r!rrrrw9scCs|�dd�S)z|Convert a project or version name to its filename-escaped form

    Any '-' characters are currently replaced with '_'.
    rbr�rr�rrrrxBsc
CsHzt|�Wn6tk
rB}zd|_d|_|WY�Sd}~XYnXdS)zo
    Validate text as a PEP 508 environment marker; return an exception
    if invalid or False otherwise.
    NF)rz�SyntaxErrorr\�lineno)�text�errrryJsc
CsJztj�|�}|��WStjjk
rD}zt|��W5d}~XYnXdS)z�
    Evaluate a PEP 508 environment marker.
    Return a boolean indicating the marker result in this environment.
    Raise SyntaxError if marker is invalid.

    This implementation uses the 'pyparsing' module.
    N)rZmarkersZMarkerr$Z
InvalidMarkerre)rgr!r#rhrrrrzXs

c@s�eZdZdZdZdZdZdd�Zdd�Zdd�Z	d	d
�Z
dd�Zd
d�Zdd�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd �Zd!d"�Zd#d$�Zd%d&�Zed'd(��Zd)d*�ZdS)+r�zETry to implement resources and metadata for arbitrary PEP 302 loadersNcCs(t|dd�|_tj�t|dd��|_dS)Nr��__file__r�)r�r�r�r��dirname�module_path�r�r�rrrr�nszNullProvider.__init__cCs|�|j|�Sr)�_fnrk�r�r�r�rrrr�rsz"NullProvider.get_resource_filenamecCst�|�||��Sr)�io�BytesIOr�rnrrrr�usz NullProvider.get_resource_streamcCs|�|�|j|��Sr)�_getrmrkrnrrrr�xsz NullProvider.get_resource_stringcCs|�|�|j|��Sr)�_hasrmrk�r�r�rrrr�{szNullProvider.has_resourcecCs|�|j|�Sr)rm�egg_info�r�r�rrr�_get_metadata_path~szNullProvider._get_metadata_pathcCs |js|jS|�|�}|�|�Sr)rtrvrr�r�r�r�rrrr��s
zNullProvider.has_metadatac
Cst|js
dS|�|�}|�|�}tjr(|Sz|�d�WStk
rn}z|jd�||�7_�W5d}~XYnXdS)Nr��utf-8z in {} file at path: {})	rtrvrqr�PY2�decode�UnicodeDecodeError�reasonr�)r�r�r��value�excrrrr��s

zNullProvider.get_metadatacCst|�|��Sr�rur�rurrrr��szNullProvider.get_metadata_linescCs|�|�|j|��Sr)�_isdirrmrkrsrrrr]�szNullProvider.resource_isdircCs|jo|�|�|j|��Sr)rtr�rmrurrrr��szNullProvider.metadata_isdircCs|�|�|j|��Sr)�_listdirrmrkrsrrrr[�szNullProvider.resource_listdircCs|jr|�|�|j|��SgSr)rtr�rmrurrrr��szNullProvider.metadata_listdirc
Cs�d|}|�|�s$tdjft����|�|��dd�}|�dd�}|�|j|�}||d<tj	�
|�r�t|���}t
||d�}t|||�n>dd	lm}t|�d|�d�|f||<t
||d�}	t|	||�dS)
Nzscripts/z<Script {script!r} not found in metadata at {self.egg_info!r}z
�
�
ri�execr)�cache)r�rkr�r�r�r�rmrtr�r�r�r�read�compiler��	linecacher��lenr)
r�r�r�ZscriptZscript_textZscript_filename�source�coder�Zscript_coderrrrQ�s.
���zNullProvider.run_scriptcCstd��dS�Nz9Can't perform this operation for unregistered loader type��NotImplementedErrorr^rrrrr�s�zNullProvider._hascCstd��dSr�r�r^rrrr��s�zNullProvider._isdircCstd��dSr�r�r^rrrr��s�zNullProvider._listdircCs*|�|�|r&tjj|f|�d���S|S)N�/)�_validate_resource_pathr�r�rIr)r��baser�rrrrm�s
zNullProvider._fncCsptjj|�tj�kp&t�|�p&t�|�}|s0dSd}t�|�rPt�|�sPt|��t	j
|dd�dtdd�dS)aO
        Validate the resource paths according to the docs.
        https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access

        >>> warned = getfixture('recwarn')
        >>> warnings.simplefilter('always')
        >>> vrp = NullProvider._validate_resource_path
        >>> vrp('foo/bar.txt')
        >>> bool(warned)
        False
        >>> vrp('../foo/bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('/foo/bar.txt')
        >>> bool(warned)
        True
        >>> vrp('foo/../../bar.txt')
        >>> bool(warned)
        True
        >>> warned.clear()
        >>> vrp('foo/f../bar.txt')
        >>> bool(warned)
        False

        Windows path separators are straight-up disallowed.
        >>> vrp(r'\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path is not allowed.

        >>> vrp(r'C:\foo/bar.txt')
        Traceback (most recent call last):
        ...
        ValueError: Use of .. or absolute path in a resource path is not allowed.

        Blank values are allowed

        >>> vrp('')
        >>> bool(warned)
        False

        Non-string values are not.

        >>> vrp(None)
        Traceback (most recent call last):
        ...
        AttributeError: ...
        Nz=Use of .. or absolute path in a resource path is not allowed.r�z/ and will raise exceptions in a future release.r��
stacklevel)r�r��pardirr�	posixpath�sep�isabs�ntpathrLrUrV�DeprecationWarning)r�ZinvalidrYrrrr��s6���z$NullProvider._validate_resource_pathcCs$t|jd�r|j�|�Std��dS)N�get_dataz=Can't perform this operation for loaders without 'get_data()')r�r�r�r�r^rrrrqs
�zNullProvider._get)rrrr�egg_namertr�r�r�r�r�r�rvr�r�r�r]r�r[r�rQrrr�r�rmr`r�rqrrrrr�gs2
Jc@s eZdZdZdd�Zdd�ZdS)r�z&Provider based on a virtual filesystemcCst�||�|��dSr)r�r��
_setup_prefixrlrrrr�-szEggProvider.__init__cCsZ|j}d}||krVt|�r@tj�|�|_tj�|d�|_||_qV|}tj�	|�\}}q
dS)N�EGG-INFO)
rk�_is_egg_pathr�r��basenamer�rIrt�egg_rootr)r�r��oldr�rrrr�1szEggProvider._setup_prefixN)rrrrr�r�rrrrr�*sc@sDeZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Ze	dd
��Z
dS)r�z6Provides access to package resources in the filesystemcCstj�|�Sr)r�r�r�r^rrrrrCszDefaultProvider._hascCstj�|�Sr)r�r�r
r^rrrr�FszDefaultProvider._isdircCs
t�|�Sr)r��listdirr^rrrr�IszDefaultProvider._listdircCst|�|j|�d�S�N�rb)rrmrkrnrrrr�Lsz#DefaultProvider.get_resource_streamc
Cs*t|d��}|��W5QR�SQRXdSr�)rr�)r�r��streamrrrrqOszDefaultProvider._getcCs,d}|D]}tt|td��}t||�qdS)N)�SourceFileLoader�SourcelessFileLoader)r��importlib_machinery�typer�)r�Zloader_namesr�Z
loader_clsrrr�	_registerSszDefaultProvider._registerN)rrrrrrr�r�r�rqr r�rrrrr�@sc@s8eZdZdZdZdd�ZZdd�Zdd�Zd	d
�Z	dS)r�z.Provider that returns nothing for all requestsNcCsdS�NFrr^rrrr?cr@zEmptyProvider.<lambda>cCsdS�Nr�rr^rrrrqeszEmptyProvider._getcCsgSrrr^rrrr�hszEmptyProvider._listdircCsdSrrr�rrrr�kszEmptyProvider.__init__)
rrrrrkr�rrrqr�r�rrrrr�^sc@s eZdZdZedd��ZeZdS)�ZipManifestsz
    zip manifest builder
    c
s@t�|��,��fdd����D�}t|�W5QR�SQRXdS)a
        Build a dictionary similar to the zipimport directory
        caches, except instead of tuples, store ZipInfo objects.

        Use a platform-specific path separator (os.sep) for the path keys
        for compatibility with pypy on Windows.
        c3s&|]}|�dtj���|�fVqdS)r�N)r�r�r�Zgetinfo�r�r��Zzfilerrr��s��z%ZipManifests.build.<locals>.<genexpr>N)�zipfileZZipFileZnamelistr')r�r�r+rr�r�buildws
	
�zZipManifests.buildN)rrrrr r��loadrrrrr�rs
r�c@s$eZdZdZe�dd�Zdd�ZdS)�MemoizedZipManifestsz%
    Memoized zipfile manifests.
    �manifest_modzmanifest mtimecCsRtj�|�}t�|�j}||ks.||j|krH|�|�}|�||�||<||jS)zW
        Load a manifest at path or return a suitable manifest already loaded.
        )	r�r��normpathrQ�st_mtime�mtimer�r��manifest)r�r�r�r�rrrr��s
zMemoizedZipManifests.loadN)rrrrr�
namedtupler�r�rrrrr��sr�c@s�eZdZdZdZe�Zdd�Zdd�Zdd�Z	e
d	d
��Zdd�Ze
d
d��Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd �ZdS)!r�z"Resource support for zips and eggsNcCs t�||�|jjtj|_dSr)r�r�r��archiver�r��zip_prerlrrrr��szZipProvider.__init__cCsP|�tj�}||jjkrdS|�|j�r:|t|j�d�Std||jf��dS)Nr��%s is not a subpath of %s)	�rstripr�r�r�r�r�r�r��AssertionError�r��fspathrrr�
_zipinfo_name�s�zZipProvider._zipinfo_namecCsP|j|}|�|jtj�r:|t|j�dd��tj�Std||jf��dS)Nrr�)r�r�r�r�r�r�rr�)r��zip_pathr�rrr�_parts�s
�zZipProvider._partscCs|j�|jj�Sr)�_zip_manifestsr�r�r�r�rrr�zipinfo�szZipProvider.zipinfocCs\|jstd��|�|�}|��}d�|�|��|krP|D]}|�||�|��q8|�||�S)Nz5resource_filename() only supported for .egg, not .zipr�)r�r��_resource_to_zip�_get_eager_resourcesrIr��_extract_resource�
_eager_to_zip)r�r�r�r��eagersr�rrrr��s�
z!ZipProvider.get_resource_filenamecCs"|j}|jd}t�|�}||fS)N)rrr�)�	file_size�	date_time�time�mktime)Zzip_stat�sizer��	timestamprrr�_get_date_and_size�s

zZipProvider._get_date_and_sizec
Csx||��kr@|��|D]}|�|tj�||��}qtj�|�S|�|j|�\}}ts`t	d��z�|�
|j|�|��}|�
||�r�|WStdtj�|�d�\}}	t�||j�|��t�|�t|	||f�|�|	|�zt|	|�Wnhtjk
�rNtj�|��rH|�
||��r |YWStjdk�rHt|�t|	|�|YWS�YnXWn tjk
�rr|��YnX|S)Nz>"os.rename" and "os.unlink" are not supported on this platformz	.$extract)�dirrO)�_indexr�r�r�rIrjr�r��
WRITE_SUPPORT�IOErrorrNr�r��_is_current�_mkstemp�writer�r��closerr]r
�error�isfiler�rrI)
r�r�r�r�Zlastr�r�Z	real_pathZoutfZtmpnamrrrr��sN��
�




zZipProvider._extract_resourcec		Csx|�|j|�\}}tj�|�s$dSt�|�}|j|ksB|j|krFdS|j�	|�}t
|d��}|��}W5QRX||kS)zK
        Return True if the file_path is current for this zip_path
        Fr�)r�r�r�r�r�rQ�st_sizer�r�r�rr�)	r�Z	file_pathr�r�r�rQZzip_contents�fZ
file_contentsrrrr�s
zZipProvider._is_currentcCs>|jdkr8g}dD]}|�|�r|�|�|��q||_|jS)N)znative_libs.txtzeager_resources.txt)r�r�rr�)r�r�r�rrrr�"s

z ZipProvider._get_eager_resourcesc	Cs�z|jWStk
r�i}|jD]V}|�tj�}|r"tj�|dd��}||krh||�|d�q"q2|��g||<q2q"||_|YSXdS)Nr�)	Z	_dirindex�AttributeErrorr�rr�r�rIr�r)r�Zindr��parts�parentrrrr�+s
zZipProvider._indexcCs |�|�}||jkp||��kSr)r�r�r�)r�r�r�rrrrr<s
zZipProvider._hascCs|�|�|��kSr)r�r�r�rrrr�@szZipProvider._isdircCst|���|�|�d��S�Nr)rr�r�r�r�rrrr�CszZipProvider._listdircCs|�|�|j|��Sr)r�rmr�rsrrrr�FszZipProvider._eager_to_zipcCs|�|�|j|��Sr)r�rmrkrsrrrr�IszZipProvider._resource_to_zip)rrrrr�r�r�r�r�r�r�r�r�r`r�r�r�r�r�rrr�r�r�r�rrrrr��s(



7	c@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)r�a*Metadata handler for standalone PKG-INFO files

    Usage::

        metadata = FileMetadata("/path/to/PKG-INFO")

    This provider rejects all data and metadata requests except for PKG-INFO,
    which is treated as existing, and will be the contents of the file at
    the provided location.
    cCs
||_dSr�r�r^rrrr�\szFileMetadata.__init__cCs|jSrr�rurrrrv_szFileMetadata._get_metadata_pathcCs|dkotj�|j�S)N�PKG-INFO)r�r�r�rurrrr�bszFileMetadata.has_metadatac	CsD|dkrtd��tj|jddd��}|��}W5QRX|�|�|S)Nr�z(No metadata except PKG-INFO is availablerxr�)�encoding�errors)r�rorr�r��_warn_on_replacement)r�r�r��metadatarrrr�es
zFileMetadata.get_metadatacCs2d�d�}||kr.d}|jft��}t�|�dS)Ns�rxz2{self.path} could not be properly decoded in UTF-8)rzr�r�rUrV)r�r�Zreplacement_charrGrYrrrr�ns

z!FileMetadata._warn_on_replacementcCst|�|��Srrrurrrr�vszFileMetadata.get_metadata_linesN)
rrrrr�rvr�r�r�r�rrrrr�Ps	c@seZdZdZdd�ZdS)r�asMetadata provider for egg directories

    Usage::

        # Development eggs:

        egg_info = "/path/to/PackageName.egg-info"
        base_dir = os.path.dirname(egg_info)
        metadata = PathMetadata(base_dir, egg_info)
        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)

        # Unpacked egg directories:

        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
        dist = Distribution.from_filename(egg_path, metadata=metadata)
    cCs||_||_dSr)rkrt)r�r�rtrrrr��szPathMetadata.__init__N�rrrrr�rrrrr�zsc@seZdZdZdd�ZdS)r�z Metadata provider for .egg filescCsD|jtj|_||_|jr0tj�|j|j�|_n|j|_|�	�dS)z-Create a metadata provider from a zipimporterN)
r�r�r�r�r��prefixr�rIrkr�)r��importerrrrr��szEggMetadata.__init__Nr�rrrrr��sr'��_distribution_finderscCs|t|<dS)axRegister `distribution_finder` to find distributions in sys.path items

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `distribution_finder` is a callable that, passed a path
    item and the importer instance, yields ``Distribution`` instances found on
    that path item.  See ``pkg_resources.find_on_path`` for an example.Nr�)�
importer_typeZdistribution_finderrrrr��scCst|�}tt|�}||||�S)z.Yield distributions accessible via `path_item`)rr�r�)�	path_item�onlyr��finderrrrra�s
c	cs�|j�d�rdSt|�}|�d�r2tj||d�V|r:dS|�d�D]|}t|�r�tj	�
||�}tt�
|�|�}|D]
}|VqrqD|���d�rDtj	�
||�}tt�
|��}||_t�|||�VqDdS)z@
    Find eggs in zip files; possibly multiple nested eggs.
    z.whlNr��r�r��
.dist-info)r��endswithr�r�rh�
from_filenamer[r�r�r�rI�find_eggs_in_zip�	zipimport�zipimporterr.rt�
from_location)	r�r�r�r�Zsubitem�subpathr�r�Zsubmetarrrr��s$

r�cCsdSr�r)r�r�r�rrr�find_nothing�sr�cCsdd�}t||dd�S)aL
    Given a list of filenames, return them in descending order
    by version number.

    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
    >>> _by_version_descending(names)
    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
    >>> _by_version_descending(names)
    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
    cSs2tj�|�\}}t�|�d�|g�}dd�|D�S)z6
        Parse each component of the filename
        rbcSsg|]}tj�|��qSr)rrr�)r��partrrr�
<listcomp>�sz?_by_version_descending.<locals>._by_version.<locals>.<listcomp>)r�r��splitext�	itertools�chainr)r��extr�rrr�_by_version�sz+_by_version_descending.<locals>._by_versionTr1)�sorted)rMrrrr�_by_version_descending�src
#s�t���t��r4tj�t�tj��d��d�VdSt��}��fdd�|D�}t	|�}|D]2}tj��|�}t
�|��}||�D]
}	|	Vq�q\dS)z6Yield distributions accessible on a sys.path directoryr�r�Nc3s|]}t�|��r|VqdSr)�dist_factory)r�r��r�r�rrr�s�zfind_on_path.<locals>.<genexpr>)�_normalize_cached�_is_unpacked_eggrhr�r�r�r�rI�safe_listdirrr)
r�r�r�r�ZfilteredZpath_item_entriesr��fullpath�factoryr�rrr�find_on_path�s(���rcCsH|��}tt|jd��}|r tS|s0t|�r0tS|sB|�d�rBtSt�S)z9
    Return a dist_factory for a path_item and entry
    )�	.egg-infor�z	.egg-link)	r.r&rr��distributions_from_metadatar�ra�resolve_egg_link�NoDists)r�r�r�r.Zis_metarrrrs������rc@s*eZdZdZdd�ZejreZdd�ZdS)rzS
    >>> bool(NoDists())
    False

    >>> list(NoDists()('anything'))
    []
    cCsdSr�rr�rrr�__bool__.szNoDists.__bool__cCstd�Sr�)�iter)r�r	rrr�__call__3szNoDists.__call__N)	rrrrrrryZ__nonzero__rrrrrr&s
rc
Csvzt�|�WSttfk
r$YnNtk
rp}z0|jtjtjtjfkpXt	|dd�dk}|s`�W5d}~XYnXdS)zI
    Attempt to list contents of path, but suppress some exceptions.
    ZwinerrorNir)
r�r��PermissionError�NotADirectoryError�OSError�errnoZENOTDIRZEACCESZENOENTr�)r�rhZ	ignorablerrrr7s�rccsftj�|�}tj�|�r:tt�|��dkr.dSt||�}nt|�}tj�|�}t	j
|||td�VdS)Nr)�
precedence)r�r�rjr
r�r�r�r�r�rhr�r�)r��rootr�r�rrrr
Ls�r
c	cs4t|��"}|D]}|��}|r|VqW5QRXdS)z1
    Yield non-empty lines from file at path
    N)r�strip)r�r��linerrr�non_empty_lines[s

rcs.t��}�fdd�|D�}tt|�}t|d�S)za
    Given a path to an .egg-link, resolve distributions
    present in the referenced path.
    c3s$|]}tj�tj���|�VqdSr)r�r�rIrj)r��refr�rrr�ls�z#resolve_egg_link.<locals>.<genexpr>r)rrra�next)r�Zreferenced_pathsZresolved_pathsZdist_groupsrr�rrfs
�
r�
FileFinder��_namespace_handlers)�_namespace_packagescCs|t|<dS)a�Register `namespace_handler` to declare namespace packages

    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `namespace_handler` is a callable like this::

        def namespace_handler(importer, path_entry, moduleName, module):
            # return a path_entry to use for child packages

    Namespace handlers are only called if the importer object has already
    agreed that it can handle the relevant path item, and they should only
    return a subpath if the module __path__ does not already contain an
    equivalent subpath.  For an example namespace handler, see
    ``pkg_resources.file_ns_handler``.
    Nr)r�Znamespace_handlerrrrr�}sc	Cs�t|�}|dkrdSt���t�d�|�|�}W5QRX|dkrHdStj�|�}|dkr�t�	|�}tj|<g|_
t|�nt|d�s�t
d|��tt|�}|||||�}|dk	r�|j
}|�|�|�|�t|||�|S)zEEnsure that named package includes a subpath of path_item (if needed)N�ignore�__path__�Not a package:)rrU�catch_warnings�simplefilter�find_modulerGr�r��types�
ModuleTyper#�_set_parent_nsr�r�r�r r��load_module�_rebuild_mod_path)�packageNamer�r�r�r�Zhandlerr�r�rrr�
_handle_ns�s.







r.csjdd�tjD���fdd����fdd�}t||d�}dd�|D�}t|jt�r`||jd	d	�<n||_d	S)
zq
    Rebuild module.__path__ ensuring that all entries are ordered
    corresponding to their sys.path order
    cSsg|]}t|��qSr�r�r��prrrr��sz%_rebuild_mod_path.<locals>.<listcomp>cs.z��|�WStk
r(td�YSXdS)z/
        Workaround for #520 and #513.
        �infN)�indexrL�float)r�)�sys_pathrr�safe_sys_path_index�sz._rebuild_mod_path.<locals>.safe_sys_path_indexcs<|�tj�}��d�d}|d|�}�ttj�|���S)zR
        Return the ordinal of the path based on its position in sys.path
        rBrN)rr�r��countrrI)r��
path_partsZmodule_partsr�)�package_namer6rr�position_in_sys_path�sz/_rebuild_mod_path.<locals>.position_in_sys_path)r8cSsg|]}t|��qSrr/r0rrrr��sN)rGr�rr�r#r)�	orig_pathr9r�r:�new_pathr)r9r6r5rr,�s		r,cCs�t��z�|tkrW��dStj}|�d�\}}}|r|t|�|tkrLt|�ztj	|j
}Wntk
rztd|��YnXt�
|p�dg��|�t�
|g�|D]}t||�q�W5t��XdS)z9Declare that package 'packageName' is a namespace packageNrBr$)�_imp�acquire_lock�release_lockr!rGr��
rpartitionr^r�r�r#r�r�r�r�r.)r-r�r�r�r�rrrr^�s&cCsFt��z.t�|d�D]}t||�}|rt||�qW5t��XdS)zDEnsure that previously-declared namespace packages include path_itemrN)r=r>r?r!r�r.r�)r�r��packager�rrrr��s
cCsDtj�||�d�d�}t|�}|jD]}t|�|kr&q@q&|SdS)zBCompute an ns-package subpath for a filesystem or zipfile importerrBr�N)r�r�rIrrr#)r�r�r-r�r�Z
normalizedrrrr�file_ns_handler�s
rBcCsdSrr)r�r�r-r�rrr�null_ns_handler	srCcCs tj�tj�tj�t|����S)z1Normalize a file/dir name for comparison purposes)r�r��normcase�realpathr��
_cygwin_patch�r\rrrr|	scCstjdkrtj�|�S|S)a
    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
    symlink components. Using
    os.path.abspath() works around this limitation. A fix in os.getcwd()
    would probably better, in Cygwin even more so, except
    that this seems to be by design...
    �cygwin)rGrHr�r��abspathrGrrrrF	srFcCs8z
||WStk
r2t|�||<}|YSXdSr)r�r|)r\r��resultrrrr)	s

rcCs|���d�S)z7
    Determine if given path appears to be an egg.
    �.egg)r.r�r�rrrr�1	sr�cCs t|�otj�tj�|dd��S)z@
    Determine if given path appears to be an unpacked egg.
    r�r�)r�r�r�r�rIr�rrrr8	s�rcCs<|�d�}|��}|r8d�|�}ttj||tj|�dS)NrB)rrrI�setattrrGr�)r-r�r�r�rrrr*B	s


r*ccsZt|tj�r8|��D] }|��}|r|�d�s|Vqn|D]}t|�D]
}|VqHq<dS)z9Yield non-empty/non-comment lines of a string or sequence�#N)r�rr��
splitlinesrr�ru)�strs�sZssrrrruJ	s
z\w+(\.\w+)*$z�
    (?P<name>[^-]+) (
        -(?P<ver>[^-]+) (
            -py(?P<pyver>[^-]+) (
                -(?P<plat>.+)
            )?
        )?
    )?
    c@s�eZdZdZddd�Zdd�Zdd	�Zddd�Zd
d�Zddd�Z	e
�d�Ze
ddd��Ze
dd��Ze
ddd��Ze
ddd��ZdS) rjz3Object representing an advertised importable objectrNcCs<t|�std|��||_||_t|�|_t|�|_||_dS)NzInvalid module name)�MODULErLr��module_name�tuple�attrsrr�)r�r�rRrTrr�rrrr�j	s


zEntryPoint.__init__cCsHd|j|jf}|jr*|dd�|j�7}|jrD|dd�|j�7}|S)Nz%s = %s�:rBz [%s]�,)r�rRrTrIr)r�rPrrrr�s	szEntryPoint.__str__cCsdt|�S)NzEntryPoint.parse(%r)�r�r�rrrr�{	szEntryPoint.__repr__TcOs4|r|s|rtjdtdd�|r,|j||�|��S)zH
        Require packages for this EntryPoint, then resolve it.
        zJParameters to load are deprecated.  Call .resolve and .require separately.rCr�)rUrVr�rPr�)r�rPr>�kwargsrrrr�~	s�zEntryPoint.loadc
CsXt|jdgdd�}zt�t|j|�WStk
rR}ztt|���W5d}~XYnXdS)zD
        Resolve the entry point from its module and attrs.
        rr)�fromlist�levelN)	r�rR�	functools�reducer�rTr�r�r�)r�r�r~rrrr��	s
zEntryPoint.resolvecCsL|jr|jstd|��|j�|j�}tj||||jd�}tttj|��dS)Nz&Can't require() without a distribution)r)	rr�rnrr_r�rrr�)r�rrr�r+rrrrP�	s

zEntryPoint.requirez]\s*(?P<name>.+?)\s*=\s*(?P<module>[\w.]+)\s*(:\s*(?P<attr>[\w.]+))?\s*(?P<extras>\[.*\])?\s*$cCsf|j�|�}|sd}t||��|��}|�|d�}|drJ|d�d�nd}||d|d|||�S)aParse a single entry point from string `src`

        Entry point syntax follows the form::

            name = some.module:some.attr [extra1, extra2]

        The entry name and module name are required, but the ``:attrs`` and
        ``[extras]`` parts are optional
        z9EntryPoint must be in 'name=module:attrs [extras]' formatr�attrrBrr�r�)�patternrFrL�	groupdict�
_parse_extrasr)r��srcr�rNrY�resrrTrrrr��	s
zEntryPoint.parsecCs(|sdSt�d|�}|jr"t��|jS)Nr�x)rir��specsrLr)r�Zextras_specr�rrrr`�	szEntryPoint._parse_extrascCsVt|�std|��i}t|�D]2}|�||�}|j|krFtd||j��|||j<q|S)zParse an entry point groupzInvalid group namezDuplicate entry point)rQrLrur�r�)r�rK�linesr��thisr�eprrr�parse_group�	s

zEntryPoint.parse_groupcCstt|t�r|��}nt|�}i}|D]J\}}|dkrB|s:q$td��|��}||kr\td|��|�|||�||<q$|S)z!Parse a map of entry point groupsNz%Entry points must be listed in groupszDuplicate group name)r�r'r+rvrLrrh)r��datar��mapsrKrerrr�	parse_map�	s


zEntryPoint.parse_map)rrN)T)NN)N)N)N)rrrrr�r�r�r�r�rPrcr�r^r r�r`rhrkrrrrrjg	s$
	



�	
cCs>|sdStj�|�}|d�d�r:tj�|dd�d�S|S)Nr�r�zmd5=)r�)rr�Zurlparser�Z
urlunparse)rZparsedrrr�_remove_md5_fragment�	srlcCs@dd�}t||�}tt|�d�}|�d�\}}}t|���p>dS)z�
    Given an iterable of lines from a Metadata file, return
    the value of the Version field, if present, or None otherwise.
    cSs|���d�S)Nzversion:)r.r�)rrrr�is_version_line�	sz+_version_from_file.<locals>.is_version_liner�rUN)rrr�	partitionrrr)rermZ
version_linesrr�r}rrr�_version_from_file�	s

rocs�eZdZdZdZddddedefdd�ZedSdd��Z	dd	�Z
ed
d��Zdd
�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zedd��Zedd��Zdd�Zed d!��Zed"d#��Zed$d%��Zd&d'�ZdTd)d*�Zd+d,�Zd-d.�Zd/d0�ZdUd2d3�Z d4d5�Z!d6d7�Z"d8d9�Z#d:d;�Z$�fd<d=�Z%e&e'd>��s4[%edVd?d@��Z(dAdB�Z)dCdD�Z*dWdEdF�Z+dGdH�Z,dXdIdJ�Z-dKdL�Z.dMdN�Z/dOdP�Z0edQdR��Z1�Z2S)Yrhz5Wrap an actual or potential sys.path entry w/metadatar�NcCsFt|pd�|_|dk	r t|�|_||_||_||_||_|p>t|_	dS)NZUnknown)
rqrrr�_versionr+rHrrr��	_provider)r�rr�rrr+rHrrrrr�
s
zDistribution.__init__cKs~dgd\}}}}tj�|�\}}	|	��tkr^t|	��}t|�}
|
r^|
�dddd�\}}}}|||f||||d�|����S)Nrr�ZverZpyverrM)rrr+rH)r�r�r�r.�_distributionImpl�EGG_NAMErK�_reload_version)r�rr�r�r)rrr+rHrrFrrrr�
s.����zDistribution.from_locationcCs|Srrr�rrrrt#
szDistribution._reload_versioncCs(|j|j|jt|j�|jpd|jp$dfSr�)�parsed_versionrr8rlrr+rHr�rrrr0&
s�zDistribution.hashcmpcCs
t|j�Sr)�hashr0r�rrr�__hash__1
szDistribution.__hash__cCs|j|jkSr�r0�r�r8rrr�__lt__4
szDistribution.__lt__cCs|j|jkSrrxryrrr�__le__7
szDistribution.__le__cCs|j|jkSrrxryrrr�__gt__:
szDistribution.__gt__cCs|j|jkSrrxryrrr�__ge__=
szDistribution.__ge__cCst||j�sdS|j|jkSr�)r�r�r0ryrrr�__eq__@
szDistribution.__eq__cCs
||kSrrryrrr�__ne__F
szDistribution.__ne__cCs6z|jWStk
r0|j��|_}|YSXdSr)Z_keyr�rr.r7rrrr8M
s
zDistribution.keycCst|d�st|j�|_|jS)N�_parsed_version)r�r#rr�r�rrrruU
s
zDistribution.parsed_versioncCsXtjj}t|j|�}|sdS|js&dSt�d����dd�}t	�
|jft|��t
�dS)Na>
            '{project_name} ({version})' is being parsed as a legacy,
            non PEP 440,
            version. You may find odd behavior and sort order.
            In particular it will be sorted as less than 0.0. It
            is recommended to migrate to PEP 440 compatible
            versions.
            r�r�)rrr!r�r�rBrCrr�rUrVr��varsr)r�ZLVZ	is_legacyrGrrr�_warn_legacy_version\
s�	z!Distribution._warn_legacy_versioncCsZz|jWStk
rT|��}|dkrL|�|j�}d�|j|�}t||��|YSXdS)Nz4Missing 'Version:' header and/or {} file at path: {})rpr��_get_version�_get_metadata_path_for_display�PKG_INFOr�rL)r�rr�rYrrrrv
s��
zDistribution.versioncCs4z|jWStk
r,|�|���|_YnX|jS)z~
        A map of extra to its list of (direct) requirements
        for this distribution, including the null extra.
        )Z_Distribution__dep_mapr��_filter_extras�_build_dep_mapr�rrr�_dep_map�
s
zDistribution._dep_mapcCsrttd|��D]^}|}|�|�}|�d�\}}}|oDt|�pDt|�}|rNg}t|�pXd}|�|g��|�q|S)z�
        Given a mapping of extras to dependencies, strip off
        environment markers and filter out any dependencies
        not matching the markers.
        NrU)	rrrrnryrzrwr�r)�dmr!Z	new_extrar�r�r#Zfails_markerrrrr��
s
�zDistribution._filter_extrascCs@i}dD]2}t|�|��D]\}}|�|g��t|��qq|S)N)zrequires.txtzdepends.txt)rv�
_get_metadatar�rrp)r�r�r�r!r�rrrr��
s
zDistribution._build_dep_maprc	Csf|j}g}|�|�dd��|D]@}z|�|t|��Wq tk
r^td||f��Yq Xq |S)z@List of Requirements needed for this distro if `extras` are usedNrz%s has no such extra feature %r)r�rr�rwr�rn)r�rr�Zdepsrrrrr�
s
�zDistribution.requirescCs,z|j�|�}Wntk
r&YdSX|S)zK
        Return the path to the given metadata file, if available.
        z[could not detect])rqrvrKrwrrrr��
s
z+Distribution._get_metadata_path_for_displayccs$|�|�r |�|�D]
}|VqdSr)r�r�)r�r�rrrrr��
s
zDistribution._get_metadatacCs|�|j�}t|�}|Sr)r�r�ro)r�rerrrrr��
szDistribution._get_versionFcCsV|dkrtj}|j||d�|tjkrRt|j�|�d�D]}|tjkr:t|�q:dS)z>Ensure distribution is importable on `path` (default=sys.path)Nr�namespace_packages.txt)rGr�rr�rr�r�r^)r�r�r�Zpkgrrr�activate�
s


zDistribution.activatecCs8dt|j�t|j�|jptf}|jr4|d|j7}|S)z@Return what this distribution's standard .egg filename should bez
%s-%s-py%srb)rxrrr+r=rH)r�r\rrrr��
s�zDistribution.egg_namecCs |jrd||jfSt|�SdS)Nz%s (%s))rr�r�rrrr��
szDistribution.__repr__cCs@zt|dd�}Wntk
r(d}YnX|p0d}d|j|fS)Nrz[unknown version]z%s %s)r�rLr)r�rrrrr��
s
zDistribution.__str__cCs|�d�rt|��t|j|�S)zADelegate all unrecognized public attributes to .metadata providerr�)r�r�r�rq)r�r]rrr�__getattr__�
s
zDistribution.__getattr__cs.tttt|����tdd�|j��D��B�S)Ncss|]}|�d�s|VqdS�r�N)r�)r�r]rrrr�s
�z'Distribution.__dir__.<locals>.<genexpr>)rr�superrh�__dir__rqr�r:rrr�s���zDistribution.__dir__r�cKs|jt|�tj�|�|f|�Sr)r�rr�r�r�)r�r\r�r)rrrr�s
��zDistribution.from_filenamecCs<t|jtjj�r"d|j|jf}nd|j|jf}t�|�S)z?Return a ``Requirement`` that matches this distribution exactlyz%s==%sz%s===%s)r�rurrrrrir�)r��specrrrrszDistribution.as_requirementcCs.|�||�}|dkr&td||ff��|��S)z=Return the `name` entry point of `group` or raise ImportErrorNzEntry point %r not found)rVr�r�)r�rKr�rgrrrrTszDistribution.load_entry_pointcCsPz
|j}Wn,tk
r6t�|�d�|�}|_YnX|dk	rL|�|i�S|S)r�zentry_points.txtN)Z_ep_mapr�rjrkr�r�)r�rKZep_maprrrrU&s
�zDistribution.get_entry_mapcCs|�|��|�Sr�)rUr�rrrrrV2szDistribution.get_entry_infoc
Cs4|p|j}|sdSt|�}tj�|�}dd�|D�}t|�D]|\}}||kr^|rVq�q�dSq<||kr<|jtkr<|s�|||d�kr�dS|tjkr�|�	�|�
||�|�
||�q�q<|tjkr�|�	�|r�|�
d|�n
|�|�dSz|�||d�}	Wnt
k
�rY�q0Yq�X||	=||	=|	}q�dS)a�Ensure self.location is on path

        If replace=False (default):
            - If location is already in path anywhere, do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent.
              - Else: add to the end of path.
        If replace=True:
            - If location is already on path anywhere (not eggs)
              or higher priority than its parent (eggs)
              do nothing.
            - Else:
              - If it's an egg and its parent directory is on path,
                insert just ahead of the parent,
                removing any lower-priority entries.
              - Else: add it to the front of path.
        NcSsg|]}|rt|�p|�qSrr/r0rrrr�Psz*Distribution.insert_on.<locals>.<listcomp>rr)rrr�r�rj�	enumeraterr}rG�check_version_conflictr	r�r3rL)
r�r��locr�ZnlocZbdirZnpathr1rZnprrrr6s@



zDistribution.insert_oncCs�|jdkrdSt�|�d��}t|j�}|�d�D]p}|tjks2||ks2|tkrRq2|dkr\q2t	tj|dd�}|r�t|��
|�s2|�
|j�r�q2td|||jf�q2dS)N�
setuptoolsr�z
top_level.txt)Z
pkg_resourcesr�ZsiterizIModule %s was already imported from %s, but %s is being added to sys.path)r8r'r(r�r|rrGr�r!r�r��
issue_warning)r�Znspr��modname�fnrrrr�zs*

�
�
��z#Distribution.check_version_conflictcCs6z
|jWn&tk
r0tdt|��YdSXdS)NzUnbuilt egg for FT)rrLr�r�r�rrrr3�s
zDistribution.has_versioncKs@d}|��D]}|�|t||d��q|�d|j�|jf|�S)z@Copy this distribution, substituting in any changed keyword argsz<project_name version py_version platform location precedenceNr�)rr�r�rqr�)r�r)rMr]rrr�clone�s
zDistribution.clonecCsdd�|jD�S)NcSsg|]}|r|�qSrr)r�Zdeprrrr��sz'Distribution.extras.<locals>.<listcomp>)r�r�rrrr�szDistribution.extras)N)r)NF)N)N)NF)3rrrrr�r=r}r�r r�rtr�r0rwrzr{r|r}r~rr8rur�rr�r`r�r�rr�r�r�r�r�r�r�r�r�r��objectr�rrTrUrVrr�r3r�r�
__classcell__rrr:rrh
st�










		

Dc@seZdZdd�ZdS)�EggInfoDistributioncCs|��}|r||_|S)a�
        Packages installed by distutils (e.g. numpy or scipy),
        which uses an old safe_version, and so
        their version numbers can get mangled when
        converted to filenames (e.g., 1.11.0.dev0+2329eae to
        1.11.0.dev0_2329eae). These distributions will not be
        parsed properly
        downstream by Distribution and safe_version, so
        take an extra step and try to get the version number from
        the metadata file itself instead of the filename.
        )r�rp)r�Z
md_versionrrrrt�sz#EggInfoDistribution._reload_versionN)rrrrtrrrrr��sr�c@s>eZdZdZdZe�d�Zedd��Z	edd��Z
dd	�Zd
S)�DistInfoDistributionzV
    Wrap an actual or potential sys.path entry
    w/metadata, .dist-info style.
    ZMETADATAz([\(,])\s*(\d.*?)\s*([,\)])cCsFz|jWStk
r@|�|j�}tj���|�|_|jYSXdS)zParse and cache metadataN)Z	_pkg_infor�r�r��email�parserZParserZparsestr)r�r�rrr�_parsed_pkg_info�sz%DistInfoDistribution._parsed_pkg_infocCs2z|jWStk
r,|��|_|jYSXdSr)�_DistInfoDistribution__dep_mapr��_compute_dependenciesr�rrrr��s

zDistInfoDistribution._dep_mapcs�dgi}|_g�|j�d�p gD]}��t|��q"�fdd�}t|d��}|d�|�|j�d�pjgD](}t|���}tt||��|�||<ql|S)z+Recompute this distribution's dependencies.Nz
Requires-Distc3s*�D] }|jr|j�d|i�r|VqdS)Nr!r")r!r��r�rr�reqs_for_extra�szBDistInfoDistribution._compute_dependencies.<locals>.reqs_for_extrazProvides-Extra)	r�r�Zget_allrrp�	frozensetrwrr)r�r�r�r��commonr!Zs_extrarr�rr��sz*DistInfoDistribution._compute_dependenciesN)rrrrr�rcr�ZEQEQr�r�r�r�rrrrr��s

	
r�)rKrr�cOsZd}t�}zt�|�j|kr&|d7}qWntk
r<YnXtj|d|di|��dS)Nrr�)r$rGr�r�rLrUrV)r>r)rZr.rrrr��sr�c@seZdZdd�ZdS)�RequirementParseErrorcCsd�|j�S)Nr�)rIr>r�rrrr��szRequirementParseError.__str__N)rrrr�rrrrr��sr�c	cs�tt|��}|D]l}d|kr.|d|�d��}|�d�rr|dd���}z|t|�7}Wntk
rpYdSXt|�VqdS)z�Yield ``Requirement`` objects for each specification in `strs`

    `strs` must be a string, or a (possibly-nested) iterable thereof.
    z #N�\���)rrur�r�rr�
StopIterationri)rOrerrrrrps

csPeZdZ�fdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Ze	d
d��Z
�ZS)ric
s�ztt|��|�Wn2tjjk
rF}ztt|���W5d}~XYnX|j|_	t
|j�}||��|_|_
dd�|jD�|_ttt|j��|_|j
|j|jt|j�|jr�t|j�ndf|_t|j�|_dS)z>DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!NcSsg|]}|j|jf�qSr)r4r)r�r�rrrr�#sz(Requirement.__init__.<locals>.<listcomp>)r�rir�rrZInvalidRequirementr�r�r�Zunsafe_namerqr.rr8�	specifierrdrSrrwrZurlr�r#�hashCmprv�_Requirement__hash)r�Zrequirement_stringrhrr:rrr�s$
��zRequirement.__init__cCst|t�o|j|jkSr)r�rir�ryrrrr~/s

�zRequirement.__eq__cCs
||kSrrryrrrr5szRequirement.__ne__cCs0t|t�r |j|jkrdS|j}|jj|dd�S)NFT)Zprereleases)r�rhr8rr��contains)r�rrrrr�8s

zRequirement.__contains__cCs|jSr)r�r�rrrrwDszRequirement.__hash__cCsdt|�S)NzRequirement.parse(%r)rWr�rrrr�GszRequirement.__repr__cCst|�\}|Sr)rp)rPr�rrrr�Js
zRequirement.parse)rrrr�r~rr�rwr�r`r�r�rrr:rriscCst|kr|tfS|S)zJ
    Ensure object appears in the mro even
    for old-style classes.
    )r�)�classesrrr�_always_objectPs
r�cCs<tt�t|dt|����}|D]}||kr||SqdS)z2Return an adapter factory for `ob` from `registry`r�N)r��inspectZgetmror�r�)�registryr9r(�trrrr�Zsr�cCstj�|�}tj|dd�dS)z1Ensure that the parent directory of `path` existsT)�exist_okN)r�r�rjr�makedirs)r�rjrrrr{bscCsXtstd��t|�\}}|rT|rTt|�sTt|�zt|d�Wntk
rRYnXdS)z/Sandbox-bypassing version of ensure_directory()z*"os.mkdir" not supported on this platform.i�N)r�r�rr
rJr	�FileExistsError)r�rjr\rrrrJhsrJccsvd}g}t|�D]V}|�d�r\|�d�rP|s0|r:||fV|dd���}g}qftd|��q|�|�q||fVdS)asSplit a string or iterable thereof into (section, content) pairs

    Each ``section`` is a stripped version of the section header ("[section]")
    and each ``content`` is a list of stripped lines excluding blank lines and
    comment-only lines.  If there are any such lines before the first section
    header, they're returned in a first ``section`` of ``None``.
    N�[�]rr�zInvalid section heading)rur�r�rrLr�)rPZsectionZcontentrrrrrvus


cOs*tj}ztt_tj||�W�S|t_XdSr)r�r�os_open�tempfileZmkstemp)r>r)Zold_openrrrr��s
r�r")�categoryr�cOs|||�|Srr)r�r>rXrrr�_call_aside�s
r�cs.t���|d<|��fdd�t��D��dS)z=Set up global resource manager (deliberately not state-saved)Z_managerc3s&|]}|�d�s|t�|�fVqdSr�)r�r�r��r�rrr��s
�z_initialize.<locals>.<genexpr>N)rgr%r�)r.rr�r�_initialize�s
�r�cCs|t��}td|d�|j}|j}|j}|j}|}tdd�|D��|dd�dd�g|_t	t
|jtj
��t��t��d	S)
aE
    Prepare the master working set and make the ``require()``
    API available.

    This function has explicit effects on the global state
    of pkg_resources. It is intended to be invoked once at
    the initialization of this module.

    Invocation by other packages is unsupported and done
    at their own risk.
    r�)r_css|]}|jdd�VqdS)FrN�r�)r�r�rrrr��s�z1_initialize_master_working_set.<locals>.<genexpr>cSs|jdd�S)NTrr�r�rrrr?�r@z0_initialize_master_working_set.<locals>.<lambda>F)rN)rfr�r*rPrWrrQrSr�rrr�rGr�r$r%r�)r_rPrWr`rQr�rrr�_initialize_master_working_set�s"
��r�c@seZdZdZdS)r�z�
    Base class for warning about deprecations in ``pkg_resources``

    This class is not derived from ``DeprecationWarning``, and as such is
    visible by default.
    Nrrrrrr��s)N)N)F)F)F)F)N)�rZ
__future__rrGr�ror�rcr(r�r�rUrQr[Zpkgutilr4rHrr�Zemail.parserr�rr�rBr�r�r�r�rr=r�Zimpr��	NameErrorrZpkg_resources.externrZpkg_resources.extern.six.movesrrrrr	r
rr�rr�Zos.pathr
rZimportlib.machinery�	machineryr�rr�rrrr�r�Z
__metaclass__�version_info�RuntimeErrorryrrrPr_r`Zresources_streamrcZresource_dirrYrbr]rXrWr[rZr\r�r r!�RuntimeWarningrr#r&r*r0r1r5r:r;r<Z
_sget_noneZ
_sset_nonerO�__all__rKrkrlr�rmrnr�r�r=r}r~rr�r�r�rRrJr�rDr�rEr�rsrtrQr�rSrTrUrVr�r�rfr'r
rer�rorgrdrqrrrwrxryrzr�r�r�r�r�r�r�r�r�r�r�r�r�r�r�rar�r�rrrrrr
rrZImpImporterr�rr�r.r,r^r�rBrCr|rFrr�rr*rurFrQ�VERBOSE�
IGNORECASErsrjrlrorhr�r�rrr�rLr�rprrir�r�r{rJrvr��filterwarningsr�r$r�r��Warningr�rrrr�<module>sZ



�2 




.
5	A
-*

 ""


	
�	
'3�
7


&PK�V[_vendor/__init__.pynu�[���PK�V[�v��_vendor/packaging/__init__.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

from .__about__ import (
    __author__, __copyright__, __email__, __license__, __summary__, __title__,
    __uri__, __version__
)

__all__ = [
    "__title__", "__summary__", "__uri__", "__version__", "__author__",
    "__email__", "__license__", "__copyright__",
]
PK�V[�iJ�\\_vendor/packaging/_compat.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import sys


PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3

# flake8: noqa

if PY3:
    string_types = str,
else:
    string_types = basestring,


def with_metaclass(meta, *bases):
    """
    Create a base class with a metaclass.
    """
    # This requires a bit of explanation: the basic idea is to make a dummy
    # metaclass for one level of class instantiation that replaces itself with
    # the actual metaclass.
    class metaclass(meta):
        def __new__(cls, name, this_bases, d):
            return meta(name, bases, d)
    return type.__new__(metaclass, 'temporary_class', (), {})
PK�V[����� _vendor/packaging/_structures.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function


class Infinity(object):

    def __repr__(self):
        return "Infinity"

    def __hash__(self):
        return hash(repr(self))

    def __lt__(self, other):
        return False

    def __le__(self, other):
        return False

    def __eq__(self, other):
        return isinstance(other, self.__class__)

    def __ne__(self, other):
        return not isinstance(other, self.__class__)

    def __gt__(self, other):
        return True

    def __ge__(self, other):
        return True

    def __neg__(self):
        return NegativeInfinity

Infinity = Infinity()


class NegativeInfinity(object):

    def __repr__(self):
        return "-Infinity"

    def __hash__(self):
        return hash(repr(self))

    def __lt__(self, other):
        return True

    def __le__(self, other):
        return True

    def __eq__(self, other):
        return isinstance(other, self.__class__)

    def __ne__(self, other):
        return not isinstance(other, self.__class__)

    def __gt__(self, other):
        return False

    def __ge__(self, other):
        return False

    def __neg__(self):
        return Infinity

NegativeInfinity = NegativeInfinity()
PK�V[Иk�&&9_vendor/packaging/__pycache__/requirements.cpython-38.pycnu�[���U

�Qab�@srddlmZmZmZddlZddlZddlmZmZm	Z	m
Z
ddlmZmZm
Z
mZmZddlmZddlmZddlmZmZdd	lmZmZmZGd
d�de�Zeejej�Z ed��!�Z"ed
��!�Z#ed��!�Z$ed��!�Z%ed��!�Z&ed��!�Z'ed��!�Z(ed�Z)e ee)�e BZ*ee ee*��Z+e+d�Z,e+Z-ed�d�Z.e(e.Z/e-ee&e-�Z0e"e
e0�e#d�Z1eej2ej3ej4B�Z5eej2ej3ej4B�Z6e5e6AZ7ee7ee&e7�ddd�d�Z8e
e$e8e%e8B�Z9e9�:dd��e	e9�d�Z;e;�:dd��e	e��d�Ze�:d d��e'Z<e<eZ=e;e
e=�Z>e/e
e=�Z?e,e
e1�e?e>BZ@ee@eZAGd!d"�d"eB�ZCdS)#�)�absolute_import�division�print_functionN)�stringStart�	stringEnd�originalTextFor�ParseException)�
ZeroOrMore�Word�Optional�Regex�Combine)�Literal)�parse�)�MARKER_EXPR�Marker)�LegacySpecifier�	Specifier�SpecifierSetc@seZdZdZdS)�InvalidRequirementzJ
    An invalid requirement was found, users should refer to PEP 508.
    N)�__name__�
__module__�__qualname__�__doc__�rr�P/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/requirements.pyrsr�[�]�(�)�,�;�@z-_.�namez[^ ]+�url�extrasF)Z
joinStringZadjacent�	_raw_speccCs
|jpdS)N�)r'��s�l�trrr�<lambda>6�r-�	specifiercCs|dS)Nrrr)rrrr-9r.�markercCst||j|j��S)N)rZ_original_startZ
_original_endr)rrrr-=r.c@s(eZdZdZdd�Zdd�Zdd�ZdS)	�Requirementz�Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    c
Cs�zt�|�}Wn@tk
rN}z"td�||j|jd����W5d}~XYnX|j|_|jr�t�|j�}|j	r�|j
r�|j	s�|j
s�td��|j|_nd|_t|jr�|j�
�ng�|_t|j�|_|jr�|jnd|_dS)Nz+Invalid requirement, parse error at "{0!r}"�zInvalid URL given)�REQUIREMENTZparseStringrr�formatZlocr$r%�urlparseZschemeZnetloc�setr&ZasListrr/r0)�selfZrequirement_stringZreq�eZ
parsed_urlrrr�__init__Xs,����
zRequirement.__init__cCsz|jg}|jr*|�d�d�t|j����|jr@|�t|j��|jrX|�d�|j��|j	rp|�d�|j	��d�|�S)Nz[{0}]r!z@ {0}z; {0}r()
r$r&�appendr4�join�sortedr/�strr%r0)r7�partsrrr�__str__mszRequirement.__str__cCsd�t|��S)Nz<Requirement({0!r})>)r4r=)r7rrr�__repr__~szRequirement.__repr__N)rrrrr9r?r@rrrrr1Ksr1)DZ
__future__rrr�string�reZpkg_resources.extern.pyparsingrrrrr	r
rrr
r�LZ%pkg_resources.extern.six.moves.urllibrr5ZmarkersrrZ
specifiersrrr�
ValueErrorrZ
ascii_lettersZdigitsZALPHANUM�suppressZLBRACKETZRBRACKETZLPARENZRPAREN�COMMAZ	SEMICOLON�ATZPUNCTUATIONZIDENTIFIER_ENDZ
IDENTIFIER�NAMEZEXTRAZURIZURLZEXTRAS_LISTZEXTRASZ
_regex_str�VERBOSE�
IGNORECASEZVERSION_PEP440ZVERSION_LEGACYZVERSION_ONEZVERSION_MANYZ
_VERSION_SPECZsetParseActionZVERSION_SPECZMARKER_SEPERATORZMARKERZVERSION_AND_MARKERZURL_AND_MARKERZNAMED_REQUIREMENTr3�objectr1rrrr�<module>sf����PK�V[���"�"4_vendor/packaging/__pycache__/markers.cpython-38.pycnu�[���U

�Qab8 �	@s@ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
mZmZddlm
Z
mZmZmZddlmZddlmZddlmZmZd	d
ddd
gZGdd	�d	e�ZGdd
�d
e�ZGdd�de�ZGdd�de�ZGdd�de�ZGdd�de�Z Gdd�de�Z!ed�ed�Bed�Bed�Bed�Bed�Bed�Bed �Bed!�Bed"�Bed#�Bed$�Bed%�Bed&�Bed'�Bed(�Bed)�Bed*�BZ"d#d"ddddd+�Z#e"�$d,d-��ed.�ed/�Bed0�Bed1�Bed2�Bed3�Bed4�Bed5�BZ%e%ed6�Bed7�BZ&e&�$d8d-��ed9�ed:�BZ'e'�$d;d-��ed<�ed=�BZ(e"e'BZ)ee)e&e)�Z*e*�$d>d-��ed?��+�Z,ed@��+�Z-e�Z.e*ee,e.e-�BZ/e.e/e
e(e.�>ee.eZ0dAdB�Z1dSdDdE�Z2dFd-�dGd-�ej3ej4ej5ej6ej7ej8dH�Z9dIdJ�Z:e�Z;dKdL�Z<dMdN�Z=dOdP�Z>dQd
�Z?GdRd�de�Z@dS)T�)�absolute_import�division�print_functionN)�ParseException�ParseResults�stringStart�	stringEnd)�
ZeroOrMore�Group�Forward�QuotedString)�Literal�)�string_types)�	Specifier�InvalidSpecifier�
InvalidMarker�UndefinedComparison�UndefinedEnvironmentName�Marker�default_environmentc@seZdZdZdS)rzE
    An invalid marker was found, users should refer to PEP 508.
    N��__name__�
__module__�__qualname__�__doc__�rr�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/markers.pyrsc@seZdZdZdS)rzP
    An invalid operation was attempted on a value that doesn't support it.
    Nrrrrrrsc@seZdZdZdS)rz\
    A name was attempted to be used that does not exist inside of the
    environment.
    Nrrrrrr%sc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�NodecCs
||_dS�N)�value)�selfr rrr�__init__.sz
Node.__init__cCs
t|j�Sr)�strr �r!rrr�__str__1szNode.__str__cCsd�|jjt|��S)Nz<{0}({1!r})>)�format�	__class__rr#r$rrr�__repr__4sz
Node.__repr__cCst�dSr)�NotImplementedErrorr$rrr�	serialize7szNode.serializeN)rrrr"r%r(r*rrrrr,src@seZdZdd�ZdS)�VariablecCst|�Sr�r#r$rrrr*=szVariable.serializeN�rrrr*rrrrr+;sr+c@seZdZdd�ZdS)�ValuecCs
d�|�S)Nz"{0}")r&r$rrrr*CszValue.serializeNr-rrrrr.Asr.c@seZdZdd�ZdS)�OpcCst|�Srr,r$rrrr*IszOp.serializeNr-rrrrr/Gsr/�implementation_version�platform_python_implementation�implementation_name�python_full_version�platform_release�platform_version�platform_machine�platform_system�python_version�sys_platform�os_name�os.name�sys.platform�platform.version�platform.machine�platform.python_implementation�python_implementationZextra)r;r<r=r>r?r@cCstt�|d|d��S�Nr)r+�ALIASES�get��s�l�trrr�<lambda>i�rHz===�==�>=�<=�!=z~=�>�<�not in�incCst|d�SrA)r/rDrrrrHwrI�'�"cCst|d�SrA)r.rDrrrrHzrI�and�orcCst|d�SrA)�tuplerDrrrrH�rI�(�)cCs t|t�rdd�|D�S|SdS)NcSsg|]}t|��qSr)�_coerce_parse_result)�.0�irrr�
<listcomp>�sz(_coerce_parse_result.<locals>.<listcomp>)�
isinstancer)�resultsrrrrY�s
rYTcCs�t|tttf�st�t|t�rHt|�dkrHt|dttf�rHt|d�St|t�r�dd�|D�}|rnd�|�Sdd�|�dSn"t|t�r�d�dd	�|D��S|SdS)
Nrrcss|]}t|dd�VqdS)F)�firstN)�_format_marker�rZ�mrrr�	<genexpr>�sz!_format_marker.<locals>.<genexpr>� rWrXcSsg|]}|���qSr)r*rarrrr\�sz"_format_marker.<locals>.<listcomp>)r]�listrVr�AssertionError�lenr`�join)�markerr_�innerrrrr`�s�


r`cCs||kSrr��lhs�rhsrrrrH�rIcCs||kSrrrkrrrrH�rI)rQrPrOrLrJrMrKrNcCslztd�|��|g��}Wntk
r.YnX|�|�St�|���}|dkrbtd�|||���|||�S)N�z#Undefined {0!r} on {1!r} and {2!r}.)	rrhr*r�contains�
_operatorsrCrr&)rl�oprm�specZoperrrr�_eval_op�s
�rscCs&|�|t�}|tkr"td�|���|S)Nz/{0!r} does not exist in evaluation environment.)rC�
_undefinedrr&)�environment�namer rrr�_get_env�s�rwc	Cs�gg}|D]�}t|tttf�s"t�t|t�rB|d�t||��q
t|t�r�|\}}}t|t�rtt||j	�}|j	}n|j	}t||j	�}|d�t
|||��q
|dks�t�|dkr
|�g�q
tdd�|D��S)N���)rTrUrUcss|]}t|�VqdSr)�all)rZ�itemrrrrc�sz$_evaluate_markers.<locals>.<genexpr>)r]rerVrrf�append�_evaluate_markersr+rwr rs�any)	Zmarkersru�groupsrirlrqrmZ	lhs_valueZ	rhs_valuerrrr|�s"



r|cCs2d�|�}|j}|dkr.||dt|j�7}|S)Nz{0.major}.{0.minor}.{0.micro}�finalr)r&�releaselevelr#�serial)�info�versionZkindrrr�format_full_version�s

r�cCslttd�r ttjj�}tjj}nd}d}||tjt��t�	�t�
�t��t��t��t��dd�tjd�S)N�implementation�0rn�)r2r0r:r6r4r7r5r3r1r8r9)
�hasattr�sysr�r�r�rv�os�platform�machine�release�systemr8r@)Ziverr2rrrr�s"

�c@s.eZdZdd�Zdd�Zdd�Zd
dd	�ZdS)rc
Cs`ztt�|��|_WnFtk
rZ}z(d�|||j|jd��}t|��W5d}~XYnXdS)Nz+Invalid marker: {0!r}, parse error at {1!r}�)rY�MARKERZparseString�_markersrr&Zlocr)r!ri�eZerr_strrrrr"s�zMarker.__init__cCs
t|j�Sr)r`r�r$rrrr%szMarker.__str__cCsd�t|��S)Nz<Marker({0!r})>)r&r#r$rrrr(szMarker.__repr__NcCs$t�}|dk	r|�|�t|j|�S)a$Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        N)r�updater|r�)r!ruZcurrent_environmentrrr�evaluate s	
zMarker.evaluate)N)rrrr"r%r(r�rrrrrs)T)AZ
__future__rrr�operatorr�r�r�Zpkg_resources.extern.pyparsingrrrrr	r
rrr
�LZ_compatrZ
specifiersrr�__all__�
ValueErrorrrr�objectrr+r.r/ZVARIABLErBZsetParseActionZVERSION_CMPZ	MARKER_OPZMARKER_VALUEZBOOLOPZ
MARKER_VARZMARKER_ITEM�suppressZLPARENZRPARENZMARKER_EXPRZMARKER_ATOMr�rYr`�lt�le�eq�ne�ge�gtrprsrtrwr|r�rrrrrr�<module>s����������	�
���
���������������
�PK�V[xL����6_vendor/packaging/__pycache__/__about__.cpython-38.pycnu�[���U

�Qab��@sPddlmZmZmZdddddddd	gZd
ZdZdZd
ZdZ	dZ
dZde	ZdS)�)�absolute_import�division�print_function�	__title__�__summary__�__uri__�__version__�
__author__�	__email__�__license__�
__copyright__Z	packagingz"Core utilities for Python packagesz!https://github.com/pypa/packagingz16.8z)Donald Stufft and individual contributorszdonald@stufft.ioz"BSD or Apache License, Version 2.0zCopyright 2014-2016 %sN)
Z
__future__rrr�__all__rrrrr	r
rr�rr�M/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py�<module>s"�PK�V[�S�!!;_vendor/packaging/__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

�Qab�@sTddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS)
�)�absolute_import�division�print_function�)�
__author__�
__copyright__�	__email__�__license__�__summary__�	__title__�__uri__�__version__rr
rr
rrr	rN)Z
__future__rrr�	__about__rrrr	r
rrr
�__all__�rr�L/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__init__.py�<module>s(�PK�V[fa���
�
8_vendor/packaging/__pycache__/_structures.cpython-38.pycnu�[���U

�Qab��@sDddlmZmZmZGdd�de�Ze�ZGdd�de�Ze�ZdS)�)�absolute_import�division�print_functionc@sTeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZdS)�InfinitycCsdS)Nr���selfrr�O/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_structures.py�__repr__	szInfinity.__repr__cCstt|��S�N��hash�reprrrrr	�__hash__szInfinity.__hash__cCsdS�NFr�r�otherrrr	�__lt__szInfinity.__lt__cCsdSrrrrrr	�__le__szInfinity.__le__cCst||j�Sr��
isinstance�	__class__rrrr	�__eq__szInfinity.__eq__cCst||j�Srrrrrr	�__ne__szInfinity.__ne__cCsdS�NTrrrrr	�__gt__szInfinity.__gt__cCsdSrrrrrr	�__ge__szInfinity.__ge__cCstSr)�NegativeInfinityrrrr	�__neg__!szInfinity.__neg__N��__name__�
__module__�__qualname__r
rrrrrrrrrrrr	rsrc@sTeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZdS)rcCsdS)Nz	-Infinityrrrrr	r
)szNegativeInfinity.__repr__cCstt|��Srrrrrr	r,szNegativeInfinity.__hash__cCsdSrrrrrr	r/szNegativeInfinity.__lt__cCsdSrrrrrr	r2szNegativeInfinity.__le__cCst||j�Srrrrrr	r5szNegativeInfinity.__eq__cCst||j�Srrrrrr	r8szNegativeInfinity.__ne__cCsdSrrrrrr	r;szNegativeInfinity.__gt__cCsdSrrrrrr	r>szNegativeInfinity.__ge__cCstSr)rrrrr	rAszNegativeInfinity.__neg__Nrrrrr	r'srN)Z
__future__rrr�objectrrrrrr	�<module>sPK�V[M?g�)�)4_vendor/packaging/__pycache__/version.cpython-38.pycnu�[���U

�Qab$-�	@s�ddlmZmZmZddlZddlZddlZddlmZddddd	gZ	e�
d
ddd
dddg�Zdd�ZGdd�de
�ZGdd�de�ZGdd�de�Ze�dej�Zdddddd�Zdd�Zdd�ZdZGd d�de�Zd!d"�Ze�d#�Zd$d%�Zd&d'�ZdS)(�)�absolute_import�division�print_functionN�)�Infinity�parse�Version�
LegacyVersion�InvalidVersion�VERSION_PATTERN�_Version�epoch�release�dev�pre�post�localcCs,z
t|�WStk
r&t|�YSXdS)z�
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    N)rr
r	)�version�r�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/version.pyrs
c@seZdZdZdS)r
zF
    An invalid version was found, users should refer to PEP 440.
    N)�__name__�
__module__�__qualname__�__doc__rrrrr
$sc@sLeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�_BaseVersioncCs
t|j�S�N)�hash�_key��selfrrr�__hash__,sz_BaseVersion.__hash__cCs|�|dd��S)NcSs||kSrr��s�orrr�<lambda>0�z%_BaseVersion.__lt__.<locals>.<lambda>��_compare�r�otherrrr�__lt__/sz_BaseVersion.__lt__cCs|�|dd��S)NcSs||kSrrr!rrrr$3r%z%_BaseVersion.__le__.<locals>.<lambda>r&r(rrr�__le__2sz_BaseVersion.__le__cCs|�|dd��S)NcSs||kSrrr!rrrr$6r%z%_BaseVersion.__eq__.<locals>.<lambda>r&r(rrr�__eq__5sz_BaseVersion.__eq__cCs|�|dd��S)NcSs||kSrrr!rrrr$9r%z%_BaseVersion.__ge__.<locals>.<lambda>r&r(rrr�__ge__8sz_BaseVersion.__ge__cCs|�|dd��S)NcSs||kSrrr!rrrr$<r%z%_BaseVersion.__gt__.<locals>.<lambda>r&r(rrr�__gt__;sz_BaseVersion.__gt__cCs|�|dd��S)NcSs||kSrrr!rrrr$?r%z%_BaseVersion.__ne__.<locals>.<lambda>r&r(rrr�__ne__>sz_BaseVersion.__ne__cCst|t�stS||j|j�Sr)�
isinstancer�NotImplementedr)rr)�methodrrrr'As
z_BaseVersion._compareN)rrrr r*r+r,r-r.r/r'rrrrr*src@s`eZdZdd�Zdd�Zdd�Zedd��Zed	d
��Zedd��Z	ed
d��Z
edd��ZdS)r	cCst|�|_t|j�|_dSr)�str�_version�_legacy_cmpkeyr)rrrrr�__init__Js
zLegacyVersion.__init__cCs|jSr�r4rrrr�__str__NszLegacyVersion.__str__cCsd�tt|���S)Nz<LegacyVersion({0})>��format�reprr3rrrr�__repr__QszLegacyVersion.__repr__cCs|jSrr7rrrr�publicTszLegacyVersion.publiccCs|jSrr7rrrr�base_versionXszLegacyVersion.base_versioncCsdSrrrrrrr\szLegacyVersion.localcCsdS�NFrrrrr�
is_prerelease`szLegacyVersion.is_prereleasecCsdSr?rrrrr�is_postreleasedszLegacyVersion.is_postreleaseN)rrrr6r8r<�propertyr=r>rr@rArrrrr	Hs



z(\d+ | [a-z]+ | \.| -)�czfinal-�@)r�preview�-�rcrccs\t�|�D]F}t�||�}|r
|dkr(q
|dd�dkrF|�d�Vq
d|Vq
dVdS)N�.r�
0123456789��*�*final)�_legacy_version_component_re�split�_legacy_version_replacement_map�get�zfill)r"�partrrr�_parse_version_partsrsrScCszd}g}t|���D]T}|�d�r^|dkrD|rD|ddkrD|��q*|r^|ddkr^|��qD|�|�qt|�}||fS)N���rKrLz*final-Z00000000)rS�lower�
startswith�pop�append�tuple)rr
�partsrRrrrr5�s


r5a�
    v?
    (?:
        (?:(?P<epoch>[0-9]+)!)?                           # epoch
        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P<pre>                                          # pre-release
            [-_\.]?
            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P<pre_n>[0-9]+)?
        )?
        (?P<post>                                         # post release
            (?:-(?P<post_n1>[0-9]+))
            |
            (?:
                [-_\.]?
                (?P<post_l>post|rev|r)
                [-_\.]?
                (?P<post_n2>[0-9]+)?
            )
        )?
        (?P<dev>                                          # dev release
            [-_\.]?
            (?P<dev_l>dev)
            [-_\.]?
            (?P<dev_n>[0-9]+)?
        )?
    )
    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
c@s|eZdZe�dedejejB�Zdd�Z	dd�Z
dd�Zed	d
��Z
edd��Zed
d��Zedd��Zedd��ZdS)rz^\s*z\s*$c
Cs�|j�|�}|std�|���t|�d�r8t|�d��ndtdd�|�d��d�D��t	|�d�|�d	��t	|�d
�|�d�p�|�d��t	|�d
�|�d��t
|�d��d�|_t|jj
|jj|jj|jj|jj|jj�|_dS)NzInvalid version: '{0}'r
rcss|]}t|�VqdSr)�int��.0�irrr�	<genexpr>�sz#Version.__init__.<locals>.<genexpr>rrHZpre_lZpre_nZpost_lZpost_n1Zpost_n2Zdev_lZdev_nr�r
rrrrr)�_regex�searchr
r:r�groupr[rYrN�_parse_letter_version�_parse_local_versionr4�_cmpkeyr
rrrrrr)rr�matchrrrr6�s8�����zVersion.__init__cCsd�tt|���S)Nz<Version({0})>r9rrrrr<�szVersion.__repr__cCs�g}|jjdkr$|�d�|jj��|�d�dd�|jjD���|jjdk	rl|�d�dd�|jjD���|jjdk	r�|�d�|jjd	��|jjdk	r�|�d
�|jjd	��|jj	dk	r�|�d�d�dd�|jj	D����d�|�S)
Nr�{0}!rHcss|]}t|�VqdSr�r3�r]�xrrrr_�sz"Version.__str__.<locals>.<genexpr>�css|]}t|�VqdSrrirjrrrr_�sz.post{0}rz.dev{0}z+{0}css|]}t|�VqdSrrirjrrrr_s)
r4r
rXr:�joinrrrrr�rrZrrrr8�s�zVersion.__str__cCst|��dd�dS)N�+rr�r3rNrrrrr=
szVersion.publiccCsLg}|jjdkr$|�d�|jj��|�d�dd�|jjD���d�|�S)NrrhrHcss|]}t|�VqdSrrirjrrrr_sz'Version.base_version.<locals>.<genexpr>rl)r4r
rXr:rmrrnrrrr>s
zVersion.base_versioncCs$t|�}d|kr |�dd�dSdS)Nrorrp)rZversion_stringrrrrsz
Version.localcCst|jjp|jj�Sr)�boolr4rrrrrrr@!szVersion.is_prereleasecCst|jj�Sr)rqr4rrrrrrA%szVersion.is_postreleaseN)rrr�re�compiler�VERBOSE�
IGNORECASErar6r<r8rBr=r>rr@rArrrrr�s"

�#



cCsv|rZ|dkrd}|��}|dkr&d}n(|dkr4d}n|dkrBd}n|dkrNd	}|t|�fS|sr|rrd	}|t|�fSdS)
NrZalpha�aZbeta�b)rCrrErG)Zrev�rr)rUr[)ZletterZnumberrrrrd*s rdz[\._-]cCs$|dk	r tdd�t�|�D��SdS)zR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    Ncss&|]}|��s|��nt|�VqdSr)�isdigitrUr[)r]rRrrrr_Qs�z'_parse_local_version.<locals>.<genexpr>)rY�_local_version_seperatorsrN)rrrrreLs�recCs�tttt�dd�t|�����}|dkr@|dkr@|dk	r@t}n|dkrLt}|dkrZt}|dkrft}|dkrvt}ntdd�|D��}||||||fS)NcSs|dkS)Nrr)rkrrrr$`r%z_cmpkey.<locals>.<lambda>css*|]"}t|t�r|dfnt|fVqdS)rlN)r0r[rr\rrrr_�s�z_cmpkey.<locals>.<genexpr>)rY�reversed�list�	itertools�	dropwhilerr`rrrrfWs,���
	�rf)Z
__future__rrr�collectionsr}rrZ_structuresr�__all__�
namedtuplerr�
ValueErrorr
�objectrr	rsrtrMrOrSr5rrrdrzrerfrrrr�<module>sH��!�� k
PK�V[�u`��4_vendor/packaging/__pycache__/_compat.cpython-38.pycnu�[���U

�Qab\�@sVddlmZmZmZddlZejddkZejddkZerDefZ	ne
fZ	dd�ZdS)�)�absolute_import�division�print_functionN��cs&G��fdd�d��}t�|ddi�S)z/
    Create a base class with a metaclass.
    cseZdZ��fdd�ZdS)z!with_metaclass.<locals>.metaclasscs�|�|�S)N�)�cls�nameZ
this_bases�d��bases�metar�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_compat.py�__new__sz)with_metaclass.<locals>.metaclass.__new__N)�__name__�
__module__�__qualname__rrrrr�	metaclasssrZtemporary_classr)�typer)r
rrrrr�with_metaclasssr)Z
__future__rrr�sys�version_infoZPY2ZPY3�strZstring_typesZ
basestringrrrrr�<module>sPK�V[E�KMKM7_vendor/packaging/__pycache__/specifiers.cpython-38.pycnu�[���U

�Qabym�@s�ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZm
Z
mZGdd�de�ZGdd	�d	e
eje��ZGd
d�de�ZGdd
�d
e�Zdd�ZGdd�de�Ze�d�Zdd�Zdd�ZGdd�de�ZdS)�)�absolute_import�division�print_functionN�)�string_types�with_metaclass)�Version�
LegacyVersion�parsec@seZdZdZdS)�InvalidSpecifierzH
    An invalid specifier was found, users should refer to PEP 440.
    N)�__name__�
__module__�__qualname__�__doc__�rr�N/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/specifiers.pyrsrc@s�eZdZejdd��Zejdd��Zejdd��Zejdd��Zej	d	d
��Z
e
jdd
��Z
ejdd
d��Zejddd��Z
dS)�
BaseSpecifiercCsdS)z�
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        Nr��selfrrr�__str__szBaseSpecifier.__str__cCsdS)zF
        Returns a hash value for this Specifier like object.
        Nrrrrr�__hash__szBaseSpecifier.__hash__cCsdS)zq
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        Nr�r�otherrrr�__eq__$szBaseSpecifier.__eq__cCsdS)zu
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        Nrrrrr�__ne__+szBaseSpecifier.__ne__cCsdS)zg
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        Nrrrrr�prereleases2szBaseSpecifier.prereleasescCsdS)zd
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr�r�valuerrrr9sNcCsdS)zR
        Determines if the given item is contained within this specifier.
        Nr�r�itemrrrr�contains@szBaseSpecifier.containscCsdS)z�
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        Nr)r�iterablerrrr�filterFszBaseSpecifier.filter)N)N)rr
r�abc�abstractmethodrrrr�abstractpropertyr�setterr r"rrrrrs 





rc@s�eZdZiZd dd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�Zedd��Z
edd��Zedd��Zejdd��Zdd�Zd!dd�Zd"dd�ZdS)#�_IndividualSpecifier�NcCsF|j�|�}|std�|���|�d���|�d���f|_||_dS)NzInvalid specifier: '{0}'�operator�version)�_regex�searchr�format�group�strip�_spec�_prereleases)r�specr�matchrrr�__init__Rs�z_IndividualSpecifier.__init__cCs0|jdk	rd�|j�nd}d�|jjt|�|�S)N�, prereleases={0!r}r(z<{0}({1!r}{2})>)r1r-r�	__class__r�str�rZprerrr�__repr___s���z_IndividualSpecifier.__repr__cCsdj|j�S)Nz{0}{1})r-r0rrrrrlsz_IndividualSpecifier.__str__cCs
t|j�S�N)�hashr0rrrrrosz_IndividualSpecifier.__hash__cCsPt|t�r4z|�|�}WqDtk
r0tYSXnt||j�sDtS|j|jkSr:��
isinstancerr6r�NotImplementedr0rrrrrrs
z_IndividualSpecifier.__eq__cCsPt|t�r4z|�|�}WqDtk
r0tYSXnt||j�sDtS|j|jkSr:r<rrrrr}s
z_IndividualSpecifier.__ne__cCst|d�|j|��S)Nz_compare_{0})�getattrr-�
_operators)r�oprrr�
_get_operator�sz"_IndividualSpecifier._get_operatorcCst|ttf�st|�}|Sr:)r=r	rr
�rr*rrr�_coerce_version�sz$_IndividualSpecifier._coerce_versioncCs
|jdS)Nr�r0rrrrr)�sz_IndividualSpecifier.operatorcCs
|jdS)NrrErrrrr*�sz_IndividualSpecifier.versioncCs|jSr:�r1rrrrr�sz _IndividualSpecifier.prereleasescCs
||_dSr:rFrrrrr�scCs
|�|�Sr:�r �rrrrr�__contains__�sz!_IndividualSpecifier.__contains__cCs:|dkr|j}|�|�}|jr&|s&dS|�|j�||j�S�NF)rrD�
is_prereleaserBr)r*rrrrr �s

z_IndividualSpecifier.containsccs�d}g}d|dk	r|ndi}|D]B}|�|�}|j|f|�r |jrX|sX|jsX|�|�q d}|Vq |s||r||D]
}|VqpdS)NFrT)rDr rKr�append)rr!rZyielded�found_prereleases�kwr*�parsed_versionrrrr"�s"
��z_IndividualSpecifier.filter)r(N)N)N)rr
rr@r4r9rrrrrBrD�propertyr)r*rr&rIr r"rrrrr'Ns(







r'c@sveZdZdZe�dedejejB�Zdddddd	d
�Z	dd�Z
d
d�Zdd�Zdd�Z
dd�Zdd�Zdd�ZdS)�LegacySpecifiera�
        (?P<operator>(==|!=|<=|>=|<|>))
        \s*
        (?P<version>
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        �^\s*�\s*$�equal�	not_equal�less_than_equal�greater_than_equal�	less_than�greater_than)�==�!=�<=�>=�<�>cCst|t�stt|��}|Sr:)r=r	r7rCrrrrD�s
zLegacySpecifier._coerce_versioncCs||�|�kSr:�rD�r�prospectiver2rrr�_compare_equal�szLegacySpecifier._compare_equalcCs||�|�kSr:r`rarrr�_compare_not_equal�sz"LegacySpecifier._compare_not_equalcCs||�|�kSr:r`rarrr�_compare_less_than_equal�sz(LegacySpecifier._compare_less_than_equalcCs||�|�kSr:r`rarrr�_compare_greater_than_equalsz+LegacySpecifier._compare_greater_than_equalcCs||�|�kSr:r`rarrr�_compare_less_thansz"LegacySpecifier._compare_less_thancCs||�|�kSr:r`rarrr�_compare_greater_thansz%LegacySpecifier._compare_greater_thanN)rr
r�
_regex_str�re�compile�VERBOSE�
IGNORECASEr+r@rDrcrdrerfrgrhrrrrrQ�s(�

��	rQcst����fdd��}|S)Ncst|t�sdS�|||�SrJ)r=rra��fnrr�wrappeds
z)_require_version_compare.<locals>.wrapped)�	functools�wraps)rorprrnr�_require_version_compare
srsc	@s�eZdZdZe�dedejejB�Zdddddd	d
dd�Z	e
d
d��Ze
dd��Ze
dd��Z
e
dd��Ze
dd��Ze
dd��Ze
dd��Zdd�Zedd��Zejdd��Zd S)!�	Specifiera
        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
        (?P<version>
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?<!==|!=|~=)         # We have special cases for these
                                      # operators so we want to make sure they
                                      # don't match here.

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
        )
        rRrSZ
compatiblerTrUrVrWrXrYZ	arbitrary)�~=rZr[r\r]r^r_�===cCsNd�tt�dd�t|���dd��}|d7}|�d�||�oL|�d�||�S)N�.cSs|�d�o|�d�S)NZpostZdev)�
startswith��xrrr�<lambda>�s�z/Specifier._compare_compatible.<locals>.<lambda>����.*r]rZ)�join�list�	itertools�	takewhile�_version_splitrB)rrbr2�prefixrrr�_compare_compatible�s�����zSpecifier._compare_compatiblecCsp|�d�rPt|j�}t|dd��}tt|��}|dt|��}t||�\}}nt|�}|jsht|j�}||kS)Nr}���)�endswithrZpublicr�r7�len�_pad_version�localrarrrrc�s


zSpecifier._compare_equalcCs|�||�Sr:)rcrarrrrd�szSpecifier._compare_not_equalcCs|t|�kSr:�rrarrrre�sz"Specifier._compare_less_than_equalcCs|t|�kSr:r�rarrrrf�sz%Specifier._compare_greater_than_equalcCs<t|�}||ksdS|js8|jr8t|j�t|j�kr8dSdS�NFT)rrK�base_versionrarrrrg�szSpecifier._compare_less_thancCs^t|�}||ksdS|js8|jr8t|j�t|j�kr8dS|jdk	rZt|j�t|j�krZdSdSr�)rZis_postreleaser�r�rarrrrh�s
zSpecifier._compare_greater_thancCst|���t|���kSr:)r7�lowerrarrr�_compare_arbitraryszSpecifier._compare_arbitrarycCsR|jdk	r|jS|j\}}|dkrN|dkr@|�d�r@|dd�}t|�jrNdSdS)N)rZr]r\rurvrZr}r�TF)r1r0r�r
rK)rr)r*rrrrs


zSpecifier.prereleasescCs
||_dSr:rFrrrrrsN)rr
rrirjrkrlrmr+r@rsr�rcrdrerfrgrhr�rPrr&rrrrrtsD�_

��

"





rtz^([0-9]+)((?:a|b|c|rc)[0-9]+)$cCs@g}|�d�D],}t�|�}|r0|�|���q|�|�q|S)Nrw)�split�
_prefix_regexr,�extend�groupsrL)r*�resultrr3rrrr�'s
r�c
Cs�gg}}|�tt�dd�|���|�tt�dd�|���|�|t|d�d��|�|t|d�d��|�ddgtdt|d�t|d���|�ddgtdt|d�t|d���ttj|��ttj|��fS)NcSs|��Sr:��isdigitryrrrr{6�z_pad_version.<locals>.<lambda>cSs|��Sr:r�ryrrrr{7r�rr�0)rLrr�r�r��insert�max�chain)�left�rightZ
left_splitZright_splitrrrr�2s 
"�"��r�c@s�eZdZddd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Zdd�Z	dd�Z
dd�Zedd��Z
e
jdd��Z
dd�Zddd�Zd dd�ZdS)!�SpecifierSetr(Nc	Csndd�|�d�D�}t�}|D]:}z|�t|��Wqtk
rV|�t|��YqXqt|�|_||_dS)NcSsg|]}|��r|���qSr)r/��.0�srrr�
<listcomp>Rsz)SpecifierSet.__init__.<locals>.<listcomp>�,)	r��set�addrtrrQ�	frozenset�_specsr1)rZ
specifiersrZparsed�	specifierrrrr4Os
zSpecifierSet.__init__cCs*|jdk	rd�|j�nd}d�t|�|�S)Nr5r(z<SpecifierSet({0!r}{1})>)r1r-rr7r8rrrr9ds
��zSpecifierSet.__repr__cCsd�tdd�|jD���S)Nr�css|]}t|�VqdSr:)r7r�rrr�	<genexpr>nsz'SpecifierSet.__str__.<locals>.<genexpr>)r~�sortedr�rrrrrmszSpecifierSet.__str__cCs
t|j�Sr:)r;r�rrrrrpszSpecifierSet.__hash__cCs�t|t�rt|�}nt|t�s"tSt�}t|j|jB�|_|jdkrX|jdk	rX|j|_n<|jdk	rv|jdkrv|j|_n|j|jkr�|j|_ntd��|S)NzFCannot combine SpecifierSets with True and False prerelease overrides.)r=rr�r>r�r�r1�
ValueError)rrr�rrr�__and__ss 





�zSpecifierSet.__and__cCsFt|t�rt|�}n&t|t�r,tt|��}nt|t�s:tS|j|jkSr:�r=rr�r'r7r>r�rrrrr�s



zSpecifierSet.__eq__cCsFt|t�rt|�}n&t|t�r,tt|��}nt|t�s:tS|j|jkSr:r�rrrrr�s



zSpecifierSet.__ne__cCs
t|j�Sr:)r�r�rrrr�__len__�szSpecifierSet.__len__cCs
t|j�Sr:)�iterr�rrrr�__iter__�szSpecifierSet.__iter__cCs.|jdk	r|jS|jsdStdd�|jD��S)Ncss|]}|jVqdSr:�rr�rrrr��sz+SpecifierSet.prereleases.<locals>.<genexpr>)r1r��anyrrrrr�s

zSpecifierSet.prereleasescCs
||_dSr:rFrrrrr�scCs
|�|�Sr:rGrHrrrrI�szSpecifierSet.__contains__csLt�ttf�st����dkr$|j��s2�jr2dSt��fdd�|jD��S)NFc3s|]}|j��d�VqdS)r�NrGr��rrrrr��s�z(SpecifierSet.contains.<locals>.<genexpr>)r=r	rr
rrK�allr�rrr�rr �s
�zSpecifierSet.containscCs�|dkr|j}|jr6|jD]}|j|t|�d�}q|Sg}g}|D]P}t|ttf�s^t|�}n|}t|t�rnqB|jr�|s�|s�|�	|�qB|�	|�qB|s�|r�|dkr�|S|SdS)Nr�)
rr�r"�boolr=r	rr
rKrL)rr!rr2ZfilteredrMrrOrrrr"�s*



zSpecifierSet.filter)r(N)N)N)rr
rr4r9rrr�rrr�r�rPrr&rIr r"rrrrr�Ms 
	




r�)Z
__future__rrrr#rqr�rjZ_compatrrr*rr	r
r�r�ABCMeta�objectrr'rQrsrtrkr�r�r�r�rrrr�<module>s&9	4	
PK�V[fa���
�
>_vendor/packaging/__pycache__/_structures.cpython-38.opt-1.pycnu�[���U

�Qab��@sDddlmZmZmZGdd�de�Ze�ZGdd�de�Ze�ZdS)�)�absolute_import�division�print_functionc@sTeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZdS)�InfinitycCsdS)Nr���selfrr�O/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_structures.py�__repr__	szInfinity.__repr__cCstt|��S�N��hash�reprrrrr	�__hash__szInfinity.__hash__cCsdS�NFr�r�otherrrr	�__lt__szInfinity.__lt__cCsdSrrrrrr	�__le__szInfinity.__le__cCst||j�Sr��
isinstance�	__class__rrrr	�__eq__szInfinity.__eq__cCst||j�Srrrrrr	�__ne__szInfinity.__ne__cCsdS�NTrrrrr	�__gt__szInfinity.__gt__cCsdSrrrrrr	�__ge__szInfinity.__ge__cCstSr)�NegativeInfinityrrrr	�__neg__!szInfinity.__neg__N��__name__�
__module__�__qualname__r
rrrrrrrrrrrr	rsrc@sTeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZdS)rcCsdS)Nz	-Infinityrrrrr	r
)szNegativeInfinity.__repr__cCstt|��Srrrrrr	r,szNegativeInfinity.__hash__cCsdSrrrrrr	r/szNegativeInfinity.__lt__cCsdSrrrrrr	r2szNegativeInfinity.__le__cCst||j�Srrrrrr	r5szNegativeInfinity.__eq__cCst||j�Srrrrrr	r8szNegativeInfinity.__ne__cCsdSrrrrrr	r;szNegativeInfinity.__gt__cCsdSrrrrrr	r>szNegativeInfinity.__ge__cCstSr)rrrrr	rAszNegativeInfinity.__neg__Nrrrrr	r'srN)Z
__future__rrr�objectrrrrrr	�<module>sPK�V[�u`��:_vendor/packaging/__pycache__/_compat.cpython-38.opt-1.pycnu�[���U

�Qab\�@sVddlmZmZmZddlZejddkZejddkZerDefZ	ne
fZ	dd�ZdS)�)�absolute_import�division�print_functionN��cs&G��fdd�d��}t�|ddi�S)z/
    Create a base class with a metaclass.
    cseZdZ��fdd�ZdS)z!with_metaclass.<locals>.metaclasscs�|�|�S)N�)�cls�nameZ
this_bases�d��bases�metar�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_compat.py�__new__sz)with_metaclass.<locals>.metaclass.__new__N)�__name__�
__module__�__qualname__rrrrr�	metaclasssrZtemporary_classr)�typer)r
rrrrr�with_metaclasssr)Z
__future__rrr�sys�version_infoZPY2ZPY3�strZstring_typesZ
basestringrrrrr�<module>sPK�V[xL����<_vendor/packaging/__pycache__/__about__.cpython-38.opt-1.pycnu�[���U

�Qab��@sPddlmZmZmZdddddddd	gZd
ZdZdZd
ZdZ	dZ
dZde	ZdS)�)�absolute_import�division�print_function�	__title__�__summary__�__uri__�__version__�
__author__�	__email__�__license__�
__copyright__Z	packagingz"Core utilities for Python packagesz!https://github.com/pypa/packagingz16.8z)Donald Stufft and individual contributorszdonald@stufft.ioz"BSD or Apache License, Version 2.0zCopyright 2014-2016 %sN)
Z
__future__rrr�__all__rrrrr	r
rr�rr�M/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py�<module>s"�PK�V[���K��8_vendor/packaging/__pycache__/utils.cpython-38.opt-1.pycnu�[���U

�Qab��@s2ddlmZmZmZddlZe�d�Zdd�ZdS)�)�absolute_import�division�print_functionNz[-_.]+cCst�d|���S)N�-)�_canonicalize_regex�sub�lower)�name�r
�I/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/utils.py�canonicalize_namesr)Z
__future__rrr�re�compilerrr
r
r
r�<module>s
PK�V[E�KMKM=_vendor/packaging/__pycache__/specifiers.cpython-38.opt-1.pycnu�[���U

�Qabym�@s�ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZm
Z
mZGdd�de�ZGdd	�d	e
eje��ZGd
d�de�ZGdd
�d
e�Zdd�ZGdd�de�Ze�d�Zdd�Zdd�ZGdd�de�ZdS)�)�absolute_import�division�print_functionN�)�string_types�with_metaclass)�Version�
LegacyVersion�parsec@seZdZdZdS)�InvalidSpecifierzH
    An invalid specifier was found, users should refer to PEP 440.
    N)�__name__�
__module__�__qualname__�__doc__�rr�N/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/specifiers.pyrsrc@s�eZdZejdd��Zejdd��Zejdd��Zejdd��Zej	d	d
��Z
e
jdd
��Z
ejdd
d��Zejddd��Z
dS)�
BaseSpecifiercCsdS)z�
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        Nr��selfrrr�__str__szBaseSpecifier.__str__cCsdS)zF
        Returns a hash value for this Specifier like object.
        Nrrrrr�__hash__szBaseSpecifier.__hash__cCsdS)zq
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        Nr�r�otherrrr�__eq__$szBaseSpecifier.__eq__cCsdS)zu
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        Nrrrrr�__ne__+szBaseSpecifier.__ne__cCsdS)zg
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        Nrrrrr�prereleases2szBaseSpecifier.prereleasescCsdS)zd
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        Nr�r�valuerrrr9sNcCsdS)zR
        Determines if the given item is contained within this specifier.
        Nr�r�itemrrrr�contains@szBaseSpecifier.containscCsdS)z�
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        Nr)r�iterablerrrr�filterFszBaseSpecifier.filter)N)N)rr
r�abc�abstractmethodrrrr�abstractpropertyr�setterr r"rrrrrs 





rc@s�eZdZiZd dd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dd�Zedd��Z
edd��Zedd��Zejdd��Zdd�Zd!dd�Zd"dd�ZdS)#�_IndividualSpecifier�NcCsF|j�|�}|std�|���|�d���|�d���f|_||_dS)NzInvalid specifier: '{0}'�operator�version)�_regex�searchr�format�group�strip�_spec�_prereleases)r�specr�matchrrr�__init__Rs�z_IndividualSpecifier.__init__cCs0|jdk	rd�|j�nd}d�|jjt|�|�S)N�, prereleases={0!r}r(z<{0}({1!r}{2})>)r1r-r�	__class__r�str�rZprerrr�__repr___s���z_IndividualSpecifier.__repr__cCsdj|j�S)Nz{0}{1})r-r0rrrrrlsz_IndividualSpecifier.__str__cCs
t|j�S�N)�hashr0rrrrrosz_IndividualSpecifier.__hash__cCsPt|t�r4z|�|�}WqDtk
r0tYSXnt||j�sDtS|j|jkSr:��
isinstancerr6r�NotImplementedr0rrrrrrs
z_IndividualSpecifier.__eq__cCsPt|t�r4z|�|�}WqDtk
r0tYSXnt||j�sDtS|j|jkSr:r<rrrrr}s
z_IndividualSpecifier.__ne__cCst|d�|j|��S)Nz_compare_{0})�getattrr-�
_operators)r�oprrr�
_get_operator�sz"_IndividualSpecifier._get_operatorcCst|ttf�st|�}|Sr:)r=r	rr
�rr*rrr�_coerce_version�sz$_IndividualSpecifier._coerce_versioncCs
|jdS)Nr�r0rrrrr)�sz_IndividualSpecifier.operatorcCs
|jdS)NrrErrrrr*�sz_IndividualSpecifier.versioncCs|jSr:�r1rrrrr�sz _IndividualSpecifier.prereleasescCs
||_dSr:rFrrrrr�scCs
|�|�Sr:�r �rrrrr�__contains__�sz!_IndividualSpecifier.__contains__cCs:|dkr|j}|�|�}|jr&|s&dS|�|j�||j�S�NF)rrD�
is_prereleaserBr)r*rrrrr �s

z_IndividualSpecifier.containsccs�d}g}d|dk	r|ndi}|D]B}|�|�}|j|f|�r |jrX|sX|jsX|�|�q d}|Vq |s||r||D]
}|VqpdS)NFrT)rDr rKr�append)rr!rZyielded�found_prereleases�kwr*�parsed_versionrrrr"�s"
��z_IndividualSpecifier.filter)r(N)N)N)rr
rr@r4r9rrrrrBrD�propertyr)r*rr&rIr r"rrrrr'Ns(







r'c@sveZdZdZe�dedejejB�Zdddddd	d
�Z	dd�Z
d
d�Zdd�Zdd�Z
dd�Zdd�Zdd�ZdS)�LegacySpecifiera�
        (?P<operator>(==|!=|<=|>=|<|>))
        \s*
        (?P<version>
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        �^\s*�\s*$�equal�	not_equal�less_than_equal�greater_than_equal�	less_than�greater_than)�==�!=�<=�>=�<�>cCst|t�stt|��}|Sr:)r=r	r7rCrrrrD�s
zLegacySpecifier._coerce_versioncCs||�|�kSr:�rD�r�prospectiver2rrr�_compare_equal�szLegacySpecifier._compare_equalcCs||�|�kSr:r`rarrr�_compare_not_equal�sz"LegacySpecifier._compare_not_equalcCs||�|�kSr:r`rarrr�_compare_less_than_equal�sz(LegacySpecifier._compare_less_than_equalcCs||�|�kSr:r`rarrr�_compare_greater_than_equalsz+LegacySpecifier._compare_greater_than_equalcCs||�|�kSr:r`rarrr�_compare_less_thansz"LegacySpecifier._compare_less_thancCs||�|�kSr:r`rarrr�_compare_greater_thansz%LegacySpecifier._compare_greater_thanN)rr
r�
_regex_str�re�compile�VERBOSE�
IGNORECASEr+r@rDrcrdrerfrgrhrrrrrQ�s(�

��	rQcst����fdd��}|S)Ncst|t�sdS�|||�SrJ)r=rra��fnrr�wrappeds
z)_require_version_compare.<locals>.wrapped)�	functools�wraps)rorprrnr�_require_version_compare
srsc	@s�eZdZdZe�dedejejB�Zdddddd	d
dd�Z	e
d
d��Ze
dd��Ze
dd��Z
e
dd��Ze
dd��Ze
dd��Ze
dd��Zdd�Zedd��Zejdd��Zd S)!�	Specifiera
        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
        (?P<version>
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?<!==|!=|~=)         # We have special cases for these
                                      # operators so we want to make sure they
                                      # don't match here.

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
        )
        rRrSZ
compatiblerTrUrVrWrXrYZ	arbitrary)�~=rZr[r\r]r^r_�===cCsNd�tt�dd�t|���dd��}|d7}|�d�||�oL|�d�||�S)N�.cSs|�d�o|�d�S)NZpostZdev)�
startswith��xrrr�<lambda>�s�z/Specifier._compare_compatible.<locals>.<lambda>����.*r]rZ)�join�list�	itertools�	takewhile�_version_splitrB)rrbr2�prefixrrr�_compare_compatible�s�����zSpecifier._compare_compatiblecCsp|�d�rPt|j�}t|dd��}tt|��}|dt|��}t||�\}}nt|�}|jsht|j�}||kS)Nr}���)�endswithrZpublicr�r7�len�_pad_version�localrarrrrc�s


zSpecifier._compare_equalcCs|�||�Sr:)rcrarrrrd�szSpecifier._compare_not_equalcCs|t|�kSr:�rrarrrre�sz"Specifier._compare_less_than_equalcCs|t|�kSr:r�rarrrrf�sz%Specifier._compare_greater_than_equalcCs<t|�}||ksdS|js8|jr8t|j�t|j�kr8dSdS�NFT)rrK�base_versionrarrrrg�szSpecifier._compare_less_thancCs^t|�}||ksdS|js8|jr8t|j�t|j�kr8dS|jdk	rZt|j�t|j�krZdSdSr�)rZis_postreleaser�r�rarrrrh�s
zSpecifier._compare_greater_thancCst|���t|���kSr:)r7�lowerrarrr�_compare_arbitraryszSpecifier._compare_arbitrarycCsR|jdk	r|jS|j\}}|dkrN|dkr@|�d�r@|dd�}t|�jrNdSdS)N)rZr]r\rurvrZr}r�TF)r1r0r�r
rK)rr)r*rrrrs


zSpecifier.prereleasescCs
||_dSr:rFrrrrrsN)rr
rrirjrkrlrmr+r@rsr�rcrdrerfrgrhr�rPrr&rrrrrtsD�_

��

"





rtz^([0-9]+)((?:a|b|c|rc)[0-9]+)$cCs@g}|�d�D],}t�|�}|r0|�|���q|�|�q|S)Nrw)�split�
_prefix_regexr,�extend�groupsrL)r*�resultrr3rrrr�'s
r�c
Cs�gg}}|�tt�dd�|���|�tt�dd�|���|�|t|d�d��|�|t|d�d��|�ddgtdt|d�t|d���|�ddgtdt|d�t|d���ttj|��ttj|��fS)NcSs|��Sr:��isdigitryrrrr{6�z_pad_version.<locals>.<lambda>cSs|��Sr:r�ryrrrr{7r�rr�0)rLrr�r�r��insert�max�chain)�left�rightZ
left_splitZright_splitrrrr�2s 
"�"��r�c@s�eZdZddd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Zdd�Z	dd�Z
dd�Zedd��Z
e
jdd��Z
dd�Zddd�Zd dd�ZdS)!�SpecifierSetr(Nc	Csndd�|�d�D�}t�}|D]:}z|�t|��Wqtk
rV|�t|��YqXqt|�|_||_dS)NcSsg|]}|��r|���qSr)r/��.0�srrr�
<listcomp>Rsz)SpecifierSet.__init__.<locals>.<listcomp>�,)	r��set�addrtrrQ�	frozenset�_specsr1)rZ
specifiersrZparsed�	specifierrrrr4Os
zSpecifierSet.__init__cCs*|jdk	rd�|j�nd}d�t|�|�S)Nr5r(z<SpecifierSet({0!r}{1})>)r1r-rr7r8rrrr9ds
��zSpecifierSet.__repr__cCsd�tdd�|jD���S)Nr�css|]}t|�VqdSr:)r7r�rrr�	<genexpr>nsz'SpecifierSet.__str__.<locals>.<genexpr>)r~�sortedr�rrrrrmszSpecifierSet.__str__cCs
t|j�Sr:)r;r�rrrrrpszSpecifierSet.__hash__cCs�t|t�rt|�}nt|t�s"tSt�}t|j|jB�|_|jdkrX|jdk	rX|j|_n<|jdk	rv|jdkrv|j|_n|j|jkr�|j|_ntd��|S)NzFCannot combine SpecifierSets with True and False prerelease overrides.)r=rr�r>r�r�r1�
ValueError)rrr�rrr�__and__ss 





�zSpecifierSet.__and__cCsFt|t�rt|�}n&t|t�r,tt|��}nt|t�s:tS|j|jkSr:�r=rr�r'r7r>r�rrrrr�s



zSpecifierSet.__eq__cCsFt|t�rt|�}n&t|t�r,tt|��}nt|t�s:tS|j|jkSr:r�rrrrr�s



zSpecifierSet.__ne__cCs
t|j�Sr:)r�r�rrrr�__len__�szSpecifierSet.__len__cCs
t|j�Sr:)�iterr�rrrr�__iter__�szSpecifierSet.__iter__cCs.|jdk	r|jS|jsdStdd�|jD��S)Ncss|]}|jVqdSr:�rr�rrrr��sz+SpecifierSet.prereleases.<locals>.<genexpr>)r1r��anyrrrrr�s

zSpecifierSet.prereleasescCs
||_dSr:rFrrrrr�scCs
|�|�Sr:rGrHrrrrI�szSpecifierSet.__contains__csLt�ttf�st����dkr$|j��s2�jr2dSt��fdd�|jD��S)NFc3s|]}|j��d�VqdS)r�NrGr��rrrrr��s�z(SpecifierSet.contains.<locals>.<genexpr>)r=r	rr
rrK�allr�rrr�rr �s
�zSpecifierSet.containscCs�|dkr|j}|jr6|jD]}|j|t|�d�}q|Sg}g}|D]P}t|ttf�s^t|�}n|}t|t�rnqB|jr�|s�|s�|�	|�qB|�	|�qB|s�|r�|dkr�|S|SdS)Nr�)
rr�r"�boolr=r	rr
rKrL)rr!rr2ZfilteredrMrrOrrrr"�s*



zSpecifierSet.filter)r(N)N)N)rr
rr4r9rrr�rrr�r�rPrr&rIr r"rrrrr�Ms 
	




r�)Z
__future__rrrr#rqr�rjZ_compatrrr*rr	r
r�r�ABCMeta�objectrr'rQrsrtrkr�r�r�r�rrrr�<module>s&9	4	
PK�V[Иk�&&?_vendor/packaging/__pycache__/requirements.cpython-38.opt-1.pycnu�[���U

�Qab�@srddlmZmZmZddlZddlZddlmZmZm	Z	m
Z
ddlmZmZm
Z
mZmZddlmZddlmZddlmZmZdd	lmZmZmZGd
d�de�Zeejej�Z ed��!�Z"ed
��!�Z#ed��!�Z$ed��!�Z%ed��!�Z&ed��!�Z'ed��!�Z(ed�Z)e ee)�e BZ*ee ee*��Z+e+d�Z,e+Z-ed�d�Z.e(e.Z/e-ee&e-�Z0e"e
e0�e#d�Z1eej2ej3ej4B�Z5eej2ej3ej4B�Z6e5e6AZ7ee7ee&e7�ddd�d�Z8e
e$e8e%e8B�Z9e9�:dd��e	e9�d�Z;e;�:dd��e	e��d�Ze�:d d��e'Z<e<eZ=e;e
e=�Z>e/e
e=�Z?e,e
e1�e?e>BZ@ee@eZAGd!d"�d"eB�ZCdS)#�)�absolute_import�division�print_functionN)�stringStart�	stringEnd�originalTextFor�ParseException)�
ZeroOrMore�Word�Optional�Regex�Combine)�Literal)�parse�)�MARKER_EXPR�Marker)�LegacySpecifier�	Specifier�SpecifierSetc@seZdZdZdS)�InvalidRequirementzJ
    An invalid requirement was found, users should refer to PEP 508.
    N)�__name__�
__module__�__qualname__�__doc__�rr�P/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/requirements.pyrsr�[�]�(�)�,�;�@z-_.�namez[^ ]+�url�extrasF)Z
joinStringZadjacent�	_raw_speccCs
|jpdS)N�)r'��s�l�trrr�<lambda>6�r-�	specifiercCs|dS)Nrrr)rrrr-9r.�markercCst||j|j��S)N)rZ_original_startZ
_original_endr)rrrr-=r.c@s(eZdZdZdd�Zdd�Zdd�ZdS)	�Requirementz�Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    c
Cs�zt�|�}Wn@tk
rN}z"td�||j|jd����W5d}~XYnX|j|_|jr�t�|j�}|j	r�|j
r�|j	s�|j
s�td��|j|_nd|_t|jr�|j�
�ng�|_t|j�|_|jr�|jnd|_dS)Nz+Invalid requirement, parse error at "{0!r}"�zInvalid URL given)�REQUIREMENTZparseStringrr�formatZlocr$r%�urlparseZschemeZnetloc�setr&ZasListrr/r0)�selfZrequirement_stringZreq�eZ
parsed_urlrrr�__init__Xs,����
zRequirement.__init__cCsz|jg}|jr*|�d�d�t|j����|jr@|�t|j��|jrX|�d�|j��|j	rp|�d�|j	��d�|�S)Nz[{0}]r!z@ {0}z; {0}r()
r$r&�appendr4�join�sortedr/�strr%r0)r7�partsrrr�__str__mszRequirement.__str__cCsd�t|��S)Nz<Requirement({0!r})>)r4r=)r7rrr�__repr__~szRequirement.__repr__N)rrrrr9r?r@rrrrr1Ksr1)DZ
__future__rrr�string�reZpkg_resources.extern.pyparsingrrrrr	r
rrr
r�LZ%pkg_resources.extern.six.moves.urllibrr5ZmarkersrrZ
specifiersrrr�
ValueErrorrZ
ascii_lettersZdigitsZALPHANUM�suppressZLBRACKETZRBRACKETZLPARENZRPAREN�COMMAZ	SEMICOLON�ATZPUNCTUATIONZIDENTIFIER_ENDZ
IDENTIFIER�NAMEZEXTRAZURIZURLZEXTRAS_LISTZEXTRASZ
_regex_str�VERBOSE�
IGNORECASEZVERSION_PEP440ZVERSION_LEGACYZVERSION_ONEZVERSION_MANYZ
_VERSION_SPECZsetParseActionZVERSION_SPECZMARKER_SEPERATORZMARKERZVERSION_AND_MARKERZURL_AND_MARKERZNAMED_REQUIREMENTr3�objectr1rrrr�<module>sf����PK�V[���K��2_vendor/packaging/__pycache__/utils.cpython-38.pycnu�[���U

�Qab��@s2ddlmZmZmZddlZe�d�Zdd�ZdS)�)�absolute_import�division�print_functionNz[-_.]+cCst�d|���S)N�-)�_canonicalize_regex�sub�lower)�name�r
�I/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/utils.py�canonicalize_namesr)Z
__future__rrr�re�compilerrr
r
r
r�<module>s
PK�V[M?g�)�):_vendor/packaging/__pycache__/version.cpython-38.opt-1.pycnu�[���U

�Qab$-�	@s�ddlmZmZmZddlZddlZddlZddlmZddddd	gZ	e�
d
ddd
dddg�Zdd�ZGdd�de
�ZGdd�de�ZGdd�de�Ze�dej�Zdddddd�Zdd�Zdd�ZdZGd d�de�Zd!d"�Ze�d#�Zd$d%�Zd&d'�ZdS)(�)�absolute_import�division�print_functionN�)�Infinity�parse�Version�
LegacyVersion�InvalidVersion�VERSION_PATTERN�_Version�epoch�release�dev�pre�post�localcCs,z
t|�WStk
r&t|�YSXdS)z�
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    N)rr
r	)�version�r�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/version.pyrs
c@seZdZdZdS)r
zF
    An invalid version was found, users should refer to PEP 440.
    N)�__name__�
__module__�__qualname__�__doc__rrrrr
$sc@sLeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�_BaseVersioncCs
t|j�S�N)�hash�_key��selfrrr�__hash__,sz_BaseVersion.__hash__cCs|�|dd��S)NcSs||kSrr��s�orrr�<lambda>0�z%_BaseVersion.__lt__.<locals>.<lambda>��_compare�r�otherrrr�__lt__/sz_BaseVersion.__lt__cCs|�|dd��S)NcSs||kSrrr!rrrr$3r%z%_BaseVersion.__le__.<locals>.<lambda>r&r(rrr�__le__2sz_BaseVersion.__le__cCs|�|dd��S)NcSs||kSrrr!rrrr$6r%z%_BaseVersion.__eq__.<locals>.<lambda>r&r(rrr�__eq__5sz_BaseVersion.__eq__cCs|�|dd��S)NcSs||kSrrr!rrrr$9r%z%_BaseVersion.__ge__.<locals>.<lambda>r&r(rrr�__ge__8sz_BaseVersion.__ge__cCs|�|dd��S)NcSs||kSrrr!rrrr$<r%z%_BaseVersion.__gt__.<locals>.<lambda>r&r(rrr�__gt__;sz_BaseVersion.__gt__cCs|�|dd��S)NcSs||kSrrr!rrrr$?r%z%_BaseVersion.__ne__.<locals>.<lambda>r&r(rrr�__ne__>sz_BaseVersion.__ne__cCst|t�stS||j|j�Sr)�
isinstancer�NotImplementedr)rr)�methodrrrr'As
z_BaseVersion._compareN)rrrr r*r+r,r-r.r/r'rrrrr*src@s`eZdZdd�Zdd�Zdd�Zedd��Zed	d
��Zedd��Z	ed
d��Z
edd��ZdS)r	cCst|�|_t|j�|_dSr)�str�_version�_legacy_cmpkeyr)rrrrr�__init__Js
zLegacyVersion.__init__cCs|jSr�r4rrrr�__str__NszLegacyVersion.__str__cCsd�tt|���S)Nz<LegacyVersion({0})>��format�reprr3rrrr�__repr__QszLegacyVersion.__repr__cCs|jSrr7rrrr�publicTszLegacyVersion.publiccCs|jSrr7rrrr�base_versionXszLegacyVersion.base_versioncCsdSrrrrrrr\szLegacyVersion.localcCsdS�NFrrrrr�
is_prerelease`szLegacyVersion.is_prereleasecCsdSr?rrrrr�is_postreleasedszLegacyVersion.is_postreleaseN)rrrr6r8r<�propertyr=r>rr@rArrrrr	Hs



z(\d+ | [a-z]+ | \.| -)�czfinal-�@)r�preview�-�rcrccs\t�|�D]F}t�||�}|r
|dkr(q
|dd�dkrF|�d�Vq
d|Vq
dVdS)N�.r�
0123456789��*�*final)�_legacy_version_component_re�split�_legacy_version_replacement_map�get�zfill)r"�partrrr�_parse_version_partsrsrScCszd}g}t|���D]T}|�d�r^|dkrD|rD|ddkrD|��q*|r^|ddkr^|��qD|�|�qt|�}||fS)N���rKrLz*final-Z00000000)rS�lower�
startswith�pop�append�tuple)rr
�partsrRrrrr5�s


r5a�
    v?
    (?:
        (?:(?P<epoch>[0-9]+)!)?                           # epoch
        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P<pre>                                          # pre-release
            [-_\.]?
            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P<pre_n>[0-9]+)?
        )?
        (?P<post>                                         # post release
            (?:-(?P<post_n1>[0-9]+))
            |
            (?:
                [-_\.]?
                (?P<post_l>post|rev|r)
                [-_\.]?
                (?P<post_n2>[0-9]+)?
            )
        )?
        (?P<dev>                                          # dev release
            [-_\.]?
            (?P<dev_l>dev)
            [-_\.]?
            (?P<dev_n>[0-9]+)?
        )?
    )
    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
c@s|eZdZe�dedejejB�Zdd�Z	dd�Z
dd�Zed	d
��Z
edd��Zed
d��Zedd��Zedd��ZdS)rz^\s*z\s*$c
Cs�|j�|�}|std�|���t|�d�r8t|�d��ndtdd�|�d��d�D��t	|�d�|�d	��t	|�d
�|�d�p�|�d��t	|�d
�|�d��t
|�d��d�|_t|jj
|jj|jj|jj|jj|jj�|_dS)NzInvalid version: '{0}'r
rcss|]}t|�VqdSr)�int��.0�irrr�	<genexpr>�sz#Version.__init__.<locals>.<genexpr>rrHZpre_lZpre_nZpost_lZpost_n1Zpost_n2Zdev_lZdev_nr�r
rrrrr)�_regex�searchr
r:r�groupr[rYrN�_parse_letter_version�_parse_local_versionr4�_cmpkeyr
rrrrrr)rr�matchrrrr6�s8�����zVersion.__init__cCsd�tt|���S)Nz<Version({0})>r9rrrrr<�szVersion.__repr__cCs�g}|jjdkr$|�d�|jj��|�d�dd�|jjD���|jjdk	rl|�d�dd�|jjD���|jjdk	r�|�d�|jjd	��|jjdk	r�|�d
�|jjd	��|jj	dk	r�|�d�d�dd�|jj	D����d�|�S)
Nr�{0}!rHcss|]}t|�VqdSr�r3�r]�xrrrr_�sz"Version.__str__.<locals>.<genexpr>�css|]}t|�VqdSrrirjrrrr_�sz.post{0}rz.dev{0}z+{0}css|]}t|�VqdSrrirjrrrr_s)
r4r
rXr:�joinrrrrr�rrZrrrr8�s�zVersion.__str__cCst|��dd�dS)N�+rr�r3rNrrrrr=
szVersion.publiccCsLg}|jjdkr$|�d�|jj��|�d�dd�|jjD���d�|�S)NrrhrHcss|]}t|�VqdSrrirjrrrr_sz'Version.base_version.<locals>.<genexpr>rl)r4r
rXr:rmrrnrrrr>s
zVersion.base_versioncCs$t|�}d|kr |�dd�dSdS)Nrorrp)rZversion_stringrrrrsz
Version.localcCst|jjp|jj�Sr)�boolr4rrrrrrr@!szVersion.is_prereleasecCst|jj�Sr)rqr4rrrrrrA%szVersion.is_postreleaseN)rrr�re�compiler�VERBOSE�
IGNORECASErar6r<r8rBr=r>rr@rArrrrr�s"

�#



cCsv|rZ|dkrd}|��}|dkr&d}n(|dkr4d}n|dkrBd}n|dkrNd	}|t|�fS|sr|rrd	}|t|�fSdS)
NrZalpha�aZbeta�b)rCrrErG)Zrev�rr)rUr[)ZletterZnumberrrrrd*s rdz[\._-]cCs$|dk	r tdd�t�|�D��SdS)zR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    Ncss&|]}|��s|��nt|�VqdSr)�isdigitrUr[)r]rRrrrr_Qs�z'_parse_local_version.<locals>.<genexpr>)rY�_local_version_seperatorsrN)rrrrreLs�recCs�tttt�dd�t|�����}|dkr@|dkr@|dk	r@t}n|dkrLt}|dkrZt}|dkrft}|dkrvt}ntdd�|D��}||||||fS)NcSs|dkS)Nrr)rkrrrr$`r%z_cmpkey.<locals>.<lambda>css*|]"}t|t�r|dfnt|fVqdS)rlN)r0r[rr\rrrr_�s�z_cmpkey.<locals>.<genexpr>)rY�reversed�list�	itertools�	dropwhilerr`rrrrfWs,���
	�rf)Z
__future__rrr�collectionsr}rrZ_structuresr�__all__�
namedtuplerr�
ValueErrorr
�objectrr	rsrtrMrOrSr5rrrdrzrerfrrrr�<module>sH��!�� k
PK�V[�r"r":_vendor/packaging/__pycache__/markers.cpython-38.opt-1.pycnu�[���U

�Qab8 �	@s@ddlmZmZmZddlZddlZddlZddlZddlm	Z	m
Z
mZmZddlm
Z
mZmZmZddlmZddlmZddlmZmZd	d
ddd
gZGdd	�d	e�ZGdd
�d
e�ZGdd�de�ZGdd�de�ZGdd�de�ZGdd�de�Z Gdd�de�Z!ed�ed�Bed�Bed�Bed�Bed�Bed�Bed �Bed!�Bed"�Bed#�Bed$�Bed%�Bed&�Bed'�Bed(�Bed)�Bed*�BZ"d#d"ddddd+�Z#e"�$d,d-��ed.�ed/�Bed0�Bed1�Bed2�Bed3�Bed4�Bed5�BZ%e%ed6�Bed7�BZ&e&�$d8d-��ed9�ed:�BZ'e'�$d;d-��ed<�ed=�BZ(e"e'BZ)ee)e&e)�Z*e*�$d>d-��ed?��+�Z,ed@��+�Z-e�Z.e*ee,e.e-�BZ/e.e/e
e(e.�>ee.eZ0dAdB�Z1dSdDdE�Z2dFd-�dGd-�ej3ej4ej5ej6ej7ej8dH�Z9dIdJ�Z:e�Z;dKdL�Z<dMdN�Z=dOdP�Z>dQd
�Z?GdRd�de�Z@dS)T�)�absolute_import�division�print_functionN)�ParseException�ParseResults�stringStart�	stringEnd)�
ZeroOrMore�Group�Forward�QuotedString)�Literal�)�string_types)�	Specifier�InvalidSpecifier�
InvalidMarker�UndefinedComparison�UndefinedEnvironmentName�Marker�default_environmentc@seZdZdZdS)rzE
    An invalid marker was found, users should refer to PEP 508.
    N��__name__�
__module__�__qualname__�__doc__�rr�K/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/markers.pyrsc@seZdZdZdS)rzP
    An invalid operation was attempted on a value that doesn't support it.
    Nrrrrrrsc@seZdZdZdS)rz\
    A name was attempted to be used that does not exist inside of the
    environment.
    Nrrrrrr%sc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�NodecCs
||_dS�N)�value)�selfr rrr�__init__.sz
Node.__init__cCs
t|j�Sr)�strr �r!rrr�__str__1szNode.__str__cCsd�|jjt|��S)Nz<{0}({1!r})>)�format�	__class__rr#r$rrr�__repr__4sz
Node.__repr__cCst�dSr)�NotImplementedErrorr$rrr�	serialize7szNode.serializeN)rrrr"r%r(r*rrrrr,src@seZdZdd�ZdS)�VariablecCst|�Sr�r#r$rrrr*=szVariable.serializeN�rrrr*rrrrr+;sr+c@seZdZdd�ZdS)�ValuecCs
d�|�S)Nz"{0}")r&r$rrrr*CszValue.serializeNr-rrrrr.Asr.c@seZdZdd�ZdS)�OpcCst|�Srr,r$rrrr*IszOp.serializeNr-rrrrr/Gsr/�implementation_version�platform_python_implementation�implementation_name�python_full_version�platform_release�platform_version�platform_machine�platform_system�python_version�sys_platform�os_name�os.name�sys.platform�platform.version�platform.machine�platform.python_implementation�python_implementationZextra)r;r<r=r>r?r@cCstt�|d|d��S�Nr)r+�ALIASES�get��s�l�trrr�<lambda>i�rHz===�==�>=�<=�!=z~=�>�<�not in�incCst|d�SrA)r/rDrrrrHwrI�'�"cCst|d�SrA)r.rDrrrrHzrI�and�orcCst|d�SrA)�tuplerDrrrrH�rI�(�)cCs t|t�rdd�|D�S|SdS)NcSsg|]}t|��qSr)�_coerce_parse_result)�.0�irrr�
<listcomp>�sz(_coerce_parse_result.<locals>.<listcomp>)�
isinstancer)�resultsrrrrY�s
rYTcCs�t|t�r4t|�dkr4t|dttf�r4t|d�St|t�rndd�|D�}|rZd�|�Sdd�|�dSn"t|t�r�d�dd	�|D��S|SdS)
Nrrcss|]}t|dd�VqdS)F)�firstN)�_format_marker�rZ�mrrr�	<genexpr>�sz!_format_marker.<locals>.<genexpr>� rWrXcSsg|]}|���qSr)r*rarrrr\�sz"_format_marker.<locals>.<listcomp>)r]�list�lenrVr`�join)�markerr_�innerrrrr`�s�


r`cCs||kSrr��lhs�rhsrrrrH�rIcCs||kSrrrjrrrrH�rI)rQrPrOrLrJrMrKrNcCslztd�|��|g��}Wntk
r.YnX|�|�St�|���}|dkrbtd�|||���|||�S)N�z#Undefined {0!r} on {1!r} and {2!r}.)	rrgr*r�contains�
_operatorsrCrr&)rk�oprl�specZoperrrr�_eval_op�s
�rrcCs&|�|t�}|tkr"td�|���|S)Nz/{0!r} does not exist in evaluation environment.)rC�
_undefinedrr&)�environment�namer rrr�_get_env�s�rvc	Cs�gg}|D]�}t|t�r.|d�t||��q
t|t�r�|\}}}t|t�r`t||j�}|j}n|j}t||j�}|d�t|||��q
|dkr
|�g�q
t	dd�|D��S)N���rUcss|]}t|�VqdSr)�all)rZ�itemrrrrc�sz$_evaluate_markers.<locals>.<genexpr>)
r]re�append�_evaluate_markersrVr+rvr rr�any)	Zmarkersrt�groupsrhrkrprlZ	lhs_valueZ	rhs_valuerrrr{�s



r{cCs2d�|�}|j}|dkr.||dt|j�7}|S)Nz{0.major}.{0.minor}.{0.micro}�finalr)r&�releaselevelr#�serial)�info�versionZkindrrr�format_full_version�s

r�cCslttd�r ttjj�}tjj}nd}d}||tjt��t�	�t�
�t��t��t��t��dd�tjd�S)N�implementation�0rm�)r2r0r:r6r4r7r5r3r1r8r9)
�hasattr�sysr�r�r�ru�os�platform�machine�release�systemr8r@)Ziverr2rrrr�s"

�c@s.eZdZdd�Zdd�Zdd�Zd
dd	�ZdS)rc
Cs`ztt�|��|_WnFtk
rZ}z(d�|||j|jd��}t|��W5d}~XYnXdS)Nz+Invalid marker: {0!r}, parse error at {1!r}�)rY�MARKERZparseString�_markersrr&Zlocr)r!rh�eZerr_strrrrr"s�zMarker.__init__cCs
t|j�Sr)r`r�r$rrrr%szMarker.__str__cCsd�t|��S)Nz<Marker({0!r})>)r&r#r$rrrr(szMarker.__repr__NcCs$t�}|dk	r|�|�t|j|�S)a$Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        N)r�updater{r�)r!rtZcurrent_environmentrrr�evaluate s	
zMarker.evaluate)N)rrrr"r%r(r�rrrrrs)T)AZ
__future__rrr�operatorr�r�r�Zpkg_resources.extern.pyparsingrrrrr	r
rrr
�LZ_compatrZ
specifiersrr�__all__�
ValueErrorrrr�objectrr+r.r/ZVARIABLErBZsetParseActionZVERSION_CMPZ	MARKER_OPZMARKER_VALUEZBOOLOPZ
MARKER_VARZMARKER_ITEM�suppressZLPARENZRPARENZMARKER_EXPRZMARKER_ATOMr�rYr`�lt�le�eq�ne�ge�gtrorrrsrvr{r�rrrrrr�<module>s����������	�
���
���������������
�PK�V[�S�!!5_vendor/packaging/__pycache__/__init__.cpython-38.pycnu�[���U

�Qab�@sTddlmZmZmZddlmZmZmZmZm	Z	m
Z
mZmZdddddd	d
dgZ
dS)
�)�absolute_import�division�print_function�)�
__author__�
__copyright__�	__email__�__license__�__summary__�	__title__�__uri__�__version__rr
rr
rrr	rN)Z
__future__rrr�	__about__rrrr	r
rrr
�__all__�rr�L/usr/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__init__.py�<module>s(�PK�V[��'��_vendor/packaging/utils.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import re


_canonicalize_regex = re.compile(r"[-_.]+")


def canonicalize_name(name):
    # This is taken from PEP 503.
    return _canonicalize_regex.sub("-", name).lower()
PK�V[0��8 8 _vendor/packaging/markers.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import operator
import os
import platform
import sys

from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pkg_resources.extern.pyparsing import Literal as L  # noqa

from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier


__all__ = [
    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
    "Marker", "default_environment",
]


class InvalidMarker(ValueError):
    """
    An invalid marker was found, users should refer to PEP 508.
    """


class UndefinedComparison(ValueError):
    """
    An invalid operation was attempted on a value that doesn't support it.
    """


class UndefinedEnvironmentName(ValueError):
    """
    A name was attempted to be used that does not exist inside of the
    environment.
    """


class Node(object):

    def __init__(self, value):
        self.value = value

    def __str__(self):
        return str(self.value)

    def __repr__(self):
        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))

    def serialize(self):
        raise NotImplementedError


class Variable(Node):

    def serialize(self):
        return str(self)


class Value(Node):

    def serialize(self):
        return '"{0}"'.format(self)


class Op(Node):

    def serialize(self):
        return str(self)


VARIABLE = (
    L("implementation_version") |
    L("platform_python_implementation") |
    L("implementation_name") |
    L("python_full_version") |
    L("platform_release") |
    L("platform_version") |
    L("platform_machine") |
    L("platform_system") |
    L("python_version") |
    L("sys_platform") |
    L("os_name") |
    L("os.name") |  # PEP-345
    L("sys.platform") |  # PEP-345
    L("platform.version") |  # PEP-345
    L("platform.machine") |  # PEP-345
    L("platform.python_implementation") |  # PEP-345
    L("python_implementation") |  # undocumented setuptools legacy
    L("extra")
)
ALIASES = {
    'os.name': 'os_name',
    'sys.platform': 'sys_platform',
    'platform.version': 'platform_version',
    'platform.machine': 'platform_machine',
    'platform.python_implementation': 'platform_python_implementation',
    'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))

VERSION_CMP = (
    L("===") |
    L("==") |
    L(">=") |
    L("<=") |
    L("!=") |
    L("~=") |
    L(">") |
    L("<")
)

MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))

MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))

BOOLOP = L("and") | L("or")

MARKER_VAR = VARIABLE | MARKER_VALUE

MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))

LPAREN = L("(").suppress()
RPAREN = L(")").suppress()

MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)

MARKER = stringStart + MARKER_EXPR + stringEnd


def _coerce_parse_result(results):
    if isinstance(results, ParseResults):
        return [_coerce_parse_result(i) for i in results]
    else:
        return results


def _format_marker(marker, first=True):
    assert isinstance(marker, (list, tuple, string_types))

    # Sometimes we have a structure like [[...]] which is a single item list
    # where the single item is itself it's own list. In that case we want skip
    # the rest of this function so that we don't get extraneous () on the
    # outside.
    if (isinstance(marker, list) and len(marker) == 1 and
            isinstance(marker[0], (list, tuple))):
        return _format_marker(marker[0])

    if isinstance(marker, list):
        inner = (_format_marker(m, first=False) for m in marker)
        if first:
            return " ".join(inner)
        else:
            return "(" + " ".join(inner) + ")"
    elif isinstance(marker, tuple):
        return " ".join([m.serialize() for m in marker])
    else:
        return marker


_operators = {
    "in": lambda lhs, rhs: lhs in rhs,
    "not in": lambda lhs, rhs: lhs not in rhs,
    "<": operator.lt,
    "<=": operator.le,
    "==": operator.eq,
    "!=": operator.ne,
    ">=": operator.ge,
    ">": operator.gt,
}


def _eval_op(lhs, op, rhs):
    try:
        spec = Specifier("".join([op.serialize(), rhs]))
    except InvalidSpecifier:
        pass
    else:
        return spec.contains(lhs)

    oper = _operators.get(op.serialize())
    if oper is None:
        raise UndefinedComparison(
            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
        )

    return oper(lhs, rhs)


_undefined = object()


def _get_env(environment, name):
    value = environment.get(name, _undefined)

    if value is _undefined:
        raise UndefinedEnvironmentName(
            "{0!r} does not exist in evaluation environment.".format(name)
        )

    return value


def _evaluate_markers(markers, environment):
    groups = [[]]

    for marker in markers:
        assert isinstance(marker, (list, tuple, string_types))

        if isinstance(marker, list):
            groups[-1].append(_evaluate_markers(marker, environment))
        elif isinstance(marker, tuple):
            lhs, op, rhs = marker

            if isinstance(lhs, Variable):
                lhs_value = _get_env(environment, lhs.value)
                rhs_value = rhs.value
            else:
                lhs_value = lhs.value
                rhs_value = _get_env(environment, rhs.value)

            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
        else:
            assert marker in ["and", "or"]
            if marker == "or":
                groups.append([])

    return any(all(item) for item in groups)


def format_full_version(info):
    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
    kind = info.releaselevel
    if kind != 'final':
        version += kind[0] + str(info.serial)
    return version


def default_environment():
    if hasattr(sys, 'implementation'):
        iver = format_full_version(sys.implementation.version)
        implementation_name = sys.implementation.name
    else:
        iver = '0'
        implementation_name = ''

    return {
        "implementation_name": implementation_name,
        "implementation_version": iver,
        "os_name": os.name,
        "platform_machine": platform.machine(),
        "platform_release": platform.release(),
        "platform_system": platform.system(),
        "platform_version": platform.version(),
        "python_full_version": platform.python_version(),
        "platform_python_implementation": platform.python_implementation(),
        "python_version": platform.python_version()[:3],
        "sys_platform": sys.platform,
    }


class Marker(object):

    def __init__(self, marker):
        try:
            self._markers = _coerce_parse_result(MARKER.parseString(marker))
        except ParseException as e:
            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
                marker, marker[e.loc:e.loc + 8])
            raise InvalidMarker(err_str)

    def __str__(self):
        return _format_marker(self._markers)

    def __repr__(self):
        return "<Marker({0!r})>".format(str(self))

    def evaluate(self, environment=None):
        """Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        """
        current_environment = default_environment()
        if environment is not None:
            current_environment.update(environment)

        return _evaluate_markers(self._markers, current_environment)
PK�V[�ơ$-$-_vendor/packaging/version.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import collections
import itertools
import re

from ._structures import Infinity


__all__ = [
    "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]


_Version = collections.namedtuple(
    "_Version",
    ["epoch", "release", "dev", "pre", "post", "local"],
)


def parse(version):
    """
    Parse the given version string and return either a :class:`Version` object
    or a :class:`LegacyVersion` object depending on if the given version is
    a valid PEP 440 version or a legacy version.
    """
    try:
        return Version(version)
    except InvalidVersion:
        return LegacyVersion(version)


class InvalidVersion(ValueError):
    """
    An invalid version was found, users should refer to PEP 440.
    """


class _BaseVersion(object):

    def __hash__(self):
        return hash(self._key)

    def __lt__(self, other):
        return self._compare(other, lambda s, o: s < o)

    def __le__(self, other):
        return self._compare(other, lambda s, o: s <= o)

    def __eq__(self, other):
        return self._compare(other, lambda s, o: s == o)

    def __ge__(self, other):
        return self._compare(other, lambda s, o: s >= o)

    def __gt__(self, other):
        return self._compare(other, lambda s, o: s > o)

    def __ne__(self, other):
        return self._compare(other, lambda s, o: s != o)

    def _compare(self, other, method):
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return method(self._key, other._key)


class LegacyVersion(_BaseVersion):

    def __init__(self, version):
        self._version = str(version)
        self._key = _legacy_cmpkey(self._version)

    def __str__(self):
        return self._version

    def __repr__(self):
        return "<LegacyVersion({0})>".format(repr(str(self)))

    @property
    def public(self):
        return self._version

    @property
    def base_version(self):
        return self._version

    @property
    def local(self):
        return None

    @property
    def is_prerelease(self):
        return False

    @property
    def is_postrelease(self):
        return False


_legacy_version_component_re = re.compile(
    r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)

_legacy_version_replacement_map = {
    "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}


def _parse_version_parts(s):
    for part in _legacy_version_component_re.split(s):
        part = _legacy_version_replacement_map.get(part, part)

        if not part or part == ".":
            continue

        if part[:1] in "0123456789":
            # pad for numeric comparison
            yield part.zfill(8)
        else:
            yield "*" + part

    # ensure that alpha/beta/candidate are before final
    yield "*final"


def _legacy_cmpkey(version):
    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
    # greater than or equal to 0. This will effectively put the LegacyVersion,
    # which uses the defacto standard originally implemented by setuptools,
    # as before all PEP 440 versions.
    epoch = -1

    # This scheme is taken from pkg_resources.parse_version setuptools prior to
    # it's adoption of the packaging library.
    parts = []
    for part in _parse_version_parts(version.lower()):
        if part.startswith("*"):
            # remove "-" before a prerelease tag
            if part < "*final":
                while parts and parts[-1] == "*final-":
                    parts.pop()

            # remove trailing zeros from each series of numeric parts
            while parts and parts[-1] == "00000000":
                parts.pop()

        parts.append(part)
    parts = tuple(parts)

    return epoch, parts

# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
    v?
    (?:
        (?:(?P<epoch>[0-9]+)!)?                           # epoch
        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P<pre>                                          # pre-release
            [-_\.]?
            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
            [-_\.]?
            (?P<pre_n>[0-9]+)?
        )?
        (?P<post>                                         # post release
            (?:-(?P<post_n1>[0-9]+))
            |
            (?:
                [-_\.]?
                (?P<post_l>post|rev|r)
                [-_\.]?
                (?P<post_n2>[0-9]+)?
            )
        )?
        (?P<dev>                                          # dev release
            [-_\.]?
            (?P<dev_l>dev)
            [-_\.]?
            (?P<dev_n>[0-9]+)?
        )?
    )
    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
"""


class Version(_BaseVersion):

    _regex = re.compile(
        r"^\s*" + VERSION_PATTERN + r"\s*$",
        re.VERBOSE | re.IGNORECASE,
    )

    def __init__(self, version):
        # Validate the version and parse it into pieces
        match = self._regex.search(version)
        if not match:
            raise InvalidVersion("Invalid version: '{0}'".format(version))

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(
                match.group("pre_l"),
                match.group("pre_n"),
            ),
            post=_parse_letter_version(
                match.group("post_l"),
                match.group("post_n1") or match.group("post_n2"),
            ),
            dev=_parse_letter_version(
                match.group("dev_l"),
                match.group("dev_n"),
            ),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self):
        return "<Version({0})>".format(repr(str(self)))

    def __str__(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        # Pre-release
        if self._version.pre is not None:
            parts.append("".join(str(x) for x in self._version.pre))

        # Post-release
        if self._version.post is not None:
            parts.append(".post{0}".format(self._version.post[1]))

        # Development release
        if self._version.dev is not None:
            parts.append(".dev{0}".format(self._version.dev[1]))

        # Local version segment
        if self._version.local is not None:
            parts.append(
                "+{0}".format(".".join(str(x) for x in self._version.local))
            )

        return "".join(parts)

    @property
    def public(self):
        return str(self).split("+", 1)[0]

    @property
    def base_version(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        return "".join(parts)

    @property
    def local(self):
        version_string = str(self)
        if "+" in version_string:
            return version_string.split("+", 1)[1]

    @property
    def is_prerelease(self):
        return bool(self._version.dev or self._version.pre)

    @property
    def is_postrelease(self):
        return bool(self._version.post)


def _parse_letter_version(letter, number):
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)
    if not letter and number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)


_local_version_seperators = re.compile(r"[\._-]")


def _parse_local_version(local):
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_seperators.split(local)
        )


def _cmpkey(epoch, release, pre, post, dev, local):
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    release = tuple(
        reversed(list(
            itertools.dropwhile(
                lambda x: x == 0,
                reversed(release),
            )
        ))
    )

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        pre = -Infinity
    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        pre = Infinity

    # Versions without a post segment should sort before those with one.
    if post is None:
        post = -Infinity

    # Versions without a development segment should sort after those with one.
    if dev is None:
        dev = Infinity

    if local is None:
        # Versions without a local segment should sort before those with one.
        local = -Infinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        local = tuple(
            (i, "") if isinstance(i, int) else (-Infinity, i)
            for i in local
        )

    return epoch, release, pre, post, dev, local
PK�V[���!_vendor/packaging/requirements.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import string
import re

from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L  # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse

from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet


class InvalidRequirement(ValueError):
    """
    An invalid requirement was found, users should refer to PEP 508.
    """


ALPHANUM = Word(string.ascii_letters + string.digits)

LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()

PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))

NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER

URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)

EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")

VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)

VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
                       joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')

VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])

MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
    lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR

VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)

NAMED_REQUIREMENT = \
    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)

REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd


class Requirement(object):
    """Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    """

    # TODO: Can we test whether something is contained within a requirement?
    #       If so how do we do that? Do we need to test against the _name_ of
    #       the thing as well as the version? What about the markers?
    # TODO: Can we normalize the name and extra name?

    def __init__(self, requirement_string):
        try:
            req = REQUIREMENT.parseString(requirement_string)
        except ParseException as e:
            raise InvalidRequirement(
                "Invalid requirement, parse error at \"{0!r}\"".format(
                    requirement_string[e.loc:e.loc + 8]))

        self.name = req.name
        if req.url:
            parsed_url = urlparse.urlparse(req.url)
            if not (parsed_url.scheme and parsed_url.netloc) or (
                    not parsed_url.scheme and not parsed_url.netloc):
                raise InvalidRequirement("Invalid URL given")
            self.url = req.url
        else:
            self.url = None
        self.extras = set(req.extras.asList() if req.extras else [])
        self.specifier = SpecifierSet(req.specifier)
        self.marker = req.marker if req.marker else None

    def __str__(self):
        parts = [self.name]

        if self.extras:
            parts.append("[{0}]".format(",".join(sorted(self.extras))))

        if self.specifier:
            parts.append(str(self.specifier))

        if self.url:
            parts.append("@ {0}".format(self.url))

        if self.marker:
            parts.append("; {0}".format(self.marker))

        return "".join(parts)

    def __repr__(self):
        return "<Requirement({0!r})>".format(str(self))
PK�V[|E��ymym_vendor/packaging/specifiers.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

import abc
import functools
import itertools
import re

from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse


class InvalidSpecifier(ValueError):
    """
    An invalid specifier was found, users should refer to PEP 440.
    """


class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):

    @abc.abstractmethod
    def __str__(self):
        """
        Returns the str representation of this Specifier like object. This
        should be representative of the Specifier itself.
        """

    @abc.abstractmethod
    def __hash__(self):
        """
        Returns a hash value for this Specifier like object.
        """

    @abc.abstractmethod
    def __eq__(self, other):
        """
        Returns a boolean representing whether or not the two Specifier like
        objects are equal.
        """

    @abc.abstractmethod
    def __ne__(self, other):
        """
        Returns a boolean representing whether or not the two Specifier like
        objects are not equal.
        """

    @abc.abstractproperty
    def prereleases(self):
        """
        Returns whether or not pre-releases as a whole are allowed by this
        specifier.
        """

    @prereleases.setter
    def prereleases(self, value):
        """
        Sets whether or not pre-releases as a whole are allowed by this
        specifier.
        """

    @abc.abstractmethod
    def contains(self, item, prereleases=None):
        """
        Determines if the given item is contained within this specifier.
        """

    @abc.abstractmethod
    def filter(self, iterable, prereleases=None):
        """
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        """


class _IndividualSpecifier(BaseSpecifier):

    _operators = {}

    def __init__(self, spec="", prereleases=None):
        match = self._regex.search(spec)
        if not match:
            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))

        self._spec = (
            match.group("operator").strip(),
            match.group("version").strip(),
        )

        # Store whether or not this Specifier should accept prereleases
        self._prereleases = prereleases

    def __repr__(self):
        pre = (
            ", prereleases={0!r}".format(self.prereleases)
            if self._prereleases is not None
            else ""
        )

        return "<{0}({1!r}{2})>".format(
            self.__class__.__name__,
            str(self),
            pre,
        )

    def __str__(self):
        return "{0}{1}".format(*self._spec)

    def __hash__(self):
        return hash(self._spec)

    def __eq__(self, other):
        if isinstance(other, string_types):
            try:
                other = self.__class__(other)
            except InvalidSpecifier:
                return NotImplemented
        elif not isinstance(other, self.__class__):
            return NotImplemented

        return self._spec == other._spec

    def __ne__(self, other):
        if isinstance(other, string_types):
            try:
                other = self.__class__(other)
            except InvalidSpecifier:
                return NotImplemented
        elif not isinstance(other, self.__class__):
            return NotImplemented

        return self._spec != other._spec

    def _get_operator(self, op):
        return getattr(self, "_compare_{0}".format(self._operators[op]))

    def _coerce_version(self, version):
        if not isinstance(version, (LegacyVersion, Version)):
            version = parse(version)
        return version

    @property
    def operator(self):
        return self._spec[0]

    @property
    def version(self):
        return self._spec[1]

    @property
    def prereleases(self):
        return self._prereleases

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value

    def __contains__(self, item):
        return self.contains(item)

    def contains(self, item, prereleases=None):
        # Determine if prereleases are to be allowed or not.
        if prereleases is None:
            prereleases = self.prereleases

        # Normalize item to a Version or LegacyVersion, this allows us to have
        # a shortcut for ``"2.0" in Specifier(">=2")
        item = self._coerce_version(item)

        # Determine if we should be supporting prereleases in this specifier
        # or not, if we do not support prereleases than we can short circuit
        # logic if this version is a prereleases.
        if item.is_prerelease and not prereleases:
            return False

        # Actually do the comparison to determine if this item is contained
        # within this Specifier or not.
        return self._get_operator(self.operator)(item, self.version)

    def filter(self, iterable, prereleases=None):
        yielded = False
        found_prereleases = []

        kw = {"prereleases": prereleases if prereleases is not None else True}

        # Attempt to iterate over all the values in the iterable and if any of
        # them match, yield them.
        for version in iterable:
            parsed_version = self._coerce_version(version)

            if self.contains(parsed_version, **kw):
                # If our version is a prerelease, and we were not set to allow
                # prereleases, then we'll store it for later incase nothing
                # else matches this specifier.
                if (parsed_version.is_prerelease and not
                        (prereleases or self.prereleases)):
                    found_prereleases.append(version)
                # Either this is not a prerelease, or we should have been
                # accepting prereleases from the begining.
                else:
                    yielded = True
                    yield version

        # Now that we've iterated over everything, determine if we've yielded
        # any values, and if we have not and we have any prereleases stored up
        # then we will go ahead and yield the prereleases.
        if not yielded and found_prereleases:
            for version in found_prereleases:
                yield version


class LegacySpecifier(_IndividualSpecifier):

    _regex_str = (
        r"""
        (?P<operator>(==|!=|<=|>=|<|>))
        \s*
        (?P<version>
            [^,;\s)]* # Since this is a "legacy" specifier, and the version
                      # string can be just about anything, we match everything
                      # except for whitespace, a semi-colon for marker support,
                      # a closing paren since versions can be enclosed in
                      # them, and a comma since it's a version separator.
        )
        """
    )

    _regex = re.compile(
        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)

    _operators = {
        "==": "equal",
        "!=": "not_equal",
        "<=": "less_than_equal",
        ">=": "greater_than_equal",
        "<": "less_than",
        ">": "greater_than",
    }

    def _coerce_version(self, version):
        if not isinstance(version, LegacyVersion):
            version = LegacyVersion(str(version))
        return version

    def _compare_equal(self, prospective, spec):
        return prospective == self._coerce_version(spec)

    def _compare_not_equal(self, prospective, spec):
        return prospective != self._coerce_version(spec)

    def _compare_less_than_equal(self, prospective, spec):
        return prospective <= self._coerce_version(spec)

    def _compare_greater_than_equal(self, prospective, spec):
        return prospective >= self._coerce_version(spec)

    def _compare_less_than(self, prospective, spec):
        return prospective < self._coerce_version(spec)

    def _compare_greater_than(self, prospective, spec):
        return prospective > self._coerce_version(spec)


def _require_version_compare(fn):
    @functools.wraps(fn)
    def wrapped(self, prospective, spec):
        if not isinstance(prospective, Version):
            return False
        return fn(self, prospective, spec)
    return wrapped


class Specifier(_IndividualSpecifier):

    _regex_str = (
        r"""
        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
        (?P<version>
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s]*    # We just match everything, except for whitespace
                          # since we are only testing for strict identity.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?

                # You cannot use a wild card and a dev or local version
                # together so group them with a | and make them optional.
                (?:
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                    |
                    \.\*  # Wild card syntax of .*
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?<!==|!=|~=)         # We have special cases for these
                                      # operators so we want to make sure they
                                      # don't match here.

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release
                (?:                   # pre release
                    [-_\.]?
                    (a|b|c|rc|alpha|beta|pre|preview)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
        )
        """
    )

    _regex = re.compile(
        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)

    _operators = {
        "~=": "compatible",
        "==": "equal",
        "!=": "not_equal",
        "<=": "less_than_equal",
        ">=": "greater_than_equal",
        "<": "less_than",
        ">": "greater_than",
        "===": "arbitrary",
    }

    @_require_version_compare
    def _compare_compatible(self, prospective, spec):
        # Compatible releases have an equivalent combination of >= and ==. That
        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
        # implement this in terms of the other specifiers instead of
        # implementing it ourselves. The only thing we need to do is construct
        # the other specifiers.

        # We want everything but the last item in the version, but we want to
        # ignore post and dev releases and we want to treat the pre-release as
        # it's own separate segment.
        prefix = ".".join(
            list(
                itertools.takewhile(
                    lambda x: (not x.startswith("post") and not
                               x.startswith("dev")),
                    _version_split(spec),
                )
            )[:-1]
        )

        # Add the prefix notation to the end of our string
        prefix += ".*"

        return (self._get_operator(">=")(prospective, spec) and
                self._get_operator("==")(prospective, prefix))

    @_require_version_compare
    def _compare_equal(self, prospective, spec):
        # We need special logic to handle prefix matching
        if spec.endswith(".*"):
            # In the case of prefix matching we want to ignore local segment.
            prospective = Version(prospective.public)
            # Split the spec out by dots, and pretend that there is an implicit
            # dot in between a release segment and a pre-release segment.
            spec = _version_split(spec[:-2])  # Remove the trailing .*

            # Split the prospective version out by dots, and pretend that there
            # is an implicit dot in between a release segment and a pre-release
            # segment.
            prospective = _version_split(str(prospective))

            # Shorten the prospective version to be the same length as the spec
            # so that we can determine if the specifier is a prefix of the
            # prospective version or not.
            prospective = prospective[:len(spec)]

            # Pad out our two sides with zeros so that they both equal the same
            # length.
            spec, prospective = _pad_version(spec, prospective)
        else:
            # Convert our spec string into a Version
            spec = Version(spec)

            # If the specifier does not have a local segment, then we want to
            # act as if the prospective version also does not have a local
            # segment.
            if not spec.local:
                prospective = Version(prospective.public)

        return prospective == spec

    @_require_version_compare
    def _compare_not_equal(self, prospective, spec):
        return not self._compare_equal(prospective, spec)

    @_require_version_compare
    def _compare_less_than_equal(self, prospective, spec):
        return prospective <= Version(spec)

    @_require_version_compare
    def _compare_greater_than_equal(self, prospective, spec):
        return prospective >= Version(spec)

    @_require_version_compare
    def _compare_less_than(self, prospective, spec):
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec)

        # Check to see if the prospective version is less than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective < spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a pre-release version, that we do not accept pre-release
        # versions for the version mentioned in the specifier (e.g. <3.1 should
        # not match 3.1.dev0, but should match 3.0.dev0).
        if not spec.is_prerelease and prospective.is_prerelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # less than the spec version *and* it's not a pre-release of the same
        # version in the spec.
        return True

    @_require_version_compare
    def _compare_greater_than(self, prospective, spec):
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec)

        # Check to see if the prospective version is greater than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective > spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a post-release version, that we do not accept
        # post-release versions for the version mentioned in the specifier
        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
        if not spec.is_postrelease and prospective.is_postrelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # Ensure that we do not allow a local version of the version mentioned
        # in the specifier, which is techincally greater than, to match.
        if prospective.local is not None:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # greater than the spec version *and* it's not a pre-release of the
        # same version in the spec.
        return True

    def _compare_arbitrary(self, prospective, spec):
        return str(prospective).lower() == str(spec).lower()

    @property
    def prereleases(self):
        # If there is an explicit prereleases set for this, then we'll just
        # blindly use that.
        if self._prereleases is not None:
            return self._prereleases

        # Look at all of our specifiers and determine if they are inclusive
        # operators, and if they are if they are including an explicit
        # prerelease.
        operator, version = self._spec
        if operator in ["==", ">=", "<=", "~=", "==="]:
            # The == specifier can include a trailing .*, if it does we
            # want to remove before parsing.
            if operator == "==" and version.endswith(".*"):
                version = version[:-2]

            # Parse the version, and if it is a pre-release than this
            # specifier allows pre-releases.
            if parse(version).is_prerelease:
                return True

        return False

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value


_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")


def _version_split(version):
    result = []
    for item in version.split("."):
        match = _prefix_regex.search(item)
        if match:
            result.extend(match.groups())
        else:
            result.append(item)
    return result


def _pad_version(left, right):
    left_split, right_split = [], []

    # Get the release segment of our versions
    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))

    # Get the rest of our versions
    left_split.append(left[len(left_split[0]):])
    right_split.append(right[len(right_split[0]):])

    # Insert our padding
    left_split.insert(
        1,
        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
    )
    right_split.insert(
        1,
        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
    )

    return (
        list(itertools.chain(*left_split)),
        list(itertools.chain(*right_split)),
    )


class SpecifierSet(BaseSpecifier):

    def __init__(self, specifiers="", prereleases=None):
        # Split on , to break each indidivual specifier into it's own item, and
        # strip each item to remove leading/trailing whitespace.
        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]

        # Parsed each individual specifier, attempting first to make it a
        # Specifier and falling back to a LegacySpecifier.
        parsed = set()
        for specifier in specifiers:
            try:
                parsed.add(Specifier(specifier))
            except InvalidSpecifier:
                parsed.add(LegacySpecifier(specifier))

        # Turn our parsed specifiers into a frozen set and save them for later.
        self._specs = frozenset(parsed)

        # Store our prereleases value so we can use it later to determine if
        # we accept prereleases or not.
        self._prereleases = prereleases

    def __repr__(self):
        pre = (
            ", prereleases={0!r}".format(self.prereleases)
            if self._prereleases is not None
            else ""
        )

        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)

    def __str__(self):
        return ",".join(sorted(str(s) for s in self._specs))

    def __hash__(self):
        return hash(self._specs)

    def __and__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        specifier = SpecifierSet()
        specifier._specs = frozenset(self._specs | other._specs)

        if self._prereleases is None and other._prereleases is not None:
            specifier._prereleases = other._prereleases
        elif self._prereleases is not None and other._prereleases is None:
            specifier._prereleases = self._prereleases
        elif self._prereleases == other._prereleases:
            specifier._prereleases = self._prereleases
        else:
            raise ValueError(
                "Cannot combine SpecifierSets with True and False prerelease "
                "overrides."
            )

        return specifier

    def __eq__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif isinstance(other, _IndividualSpecifier):
            other = SpecifierSet(str(other))
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        return self._specs == other._specs

    def __ne__(self, other):
        if isinstance(other, string_types):
            other = SpecifierSet(other)
        elif isinstance(other, _IndividualSpecifier):
            other = SpecifierSet(str(other))
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        return self._specs != other._specs

    def __len__(self):
        return len(self._specs)

    def __iter__(self):
        return iter(self._specs)

    @property
    def prereleases(self):
        # If we have been given an explicit prerelease modifier, then we'll
        # pass that through here.
        if self._prereleases is not None:
            return self._prereleases

        # If we don't have any specifiers, and we don't have a forced value,
        # then we'll just return None since we don't know if this should have
        # pre-releases or not.
        if not self._specs:
            return None

        # Otherwise we'll see if any of the given specifiers accept
        # prereleases, if any of them do we'll return True, otherwise False.
        return any(s.prereleases for s in self._specs)

    @prereleases.setter
    def prereleases(self, value):
        self._prereleases = value

    def __contains__(self, item):
        return self.contains(item)

    def contains(self, item, prereleases=None):
        # Ensure that our item is a Version or LegacyVersion instance.
        if not isinstance(item, (LegacyVersion, Version)):
            item = parse(item)

        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # We can determine if we're going to allow pre-releases by looking to
        # see if any of the underlying items supports them. If none of them do
        # and this item is a pre-release then we do not allow it and we can
        # short circuit that here.
        # Note: This means that 1.0.dev1 would not be contained in something
        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
        if not prereleases and item.is_prerelease:
            return False

        # We simply dispatch to the underlying specs here to make sure that the
        # given version is contained within all of them.
        # Note: This use of all() here means that an empty set of specifiers
        #       will always return True, this is an explicit design decision.
        return all(
            s.contains(item, prereleases=prereleases)
            for s in self._specs
        )

    def filter(self, iterable, prereleases=None):
        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # If we have any specifiers, then we want to wrap our iterable in the
        # filter method for each one, this will act as a logical AND amongst
        # each specifier.
        if self._specs:
            for spec in self._specs:
                iterable = spec.filter(iterable, prereleases=bool(prereleases))
            return iterable
        # If we do not have any specifiers, then we need to have a rough filter
        # which will filter out any pre-releases, unless there are no final
        # releases, and which will filter out LegacyVersion in general.
        else:
            filtered = []
            found_prereleases = []

            for item in iterable:
                # Ensure that we some kind of Version class for this item.
                if not isinstance(item, (LegacyVersion, Version)):
                    parsed_version = parse(item)
                else:
                    parsed_version = item

                # Filter out any item which is parsed as a LegacyVersion
                if isinstance(parsed_version, LegacyVersion):
                    continue

                # Store any item which is a pre-release for later unless we've
                # already found a final version or we are accepting prereleases
                if parsed_version.is_prerelease and not prereleases:
                    if not filtered:
                        found_prereleases.append(item)
                else:
                    filtered.append(item)

            # If we've found no items except for pre-releases, then we'll go
            # ahead and use the pre-releases
            if not filtered and found_prereleases and prereleases is None:
                return found_prereleases

            return filtered
PK�V[<)X���_vendor/packaging/__about__.pynu�[���# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function

__all__ = [
    "__title__", "__summary__", "__uri__", "__version__", "__author__",
    "__email__", "__license__", "__copyright__",
]

__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"

__version__ = "16.8"

__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"

__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
PK�V[�XMZ�u�u_vendor/six.pynu�[���"""Utilities for writing code that runs on Python 2 and 3"""

# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

from __future__ import absolute_import

import functools
import itertools
import operator
import sys
import types

__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"


# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)

if PY3:
    string_types = str,
    integer_types = int,
    class_types = type,
    text_type = str
    binary_type = bytes

    MAXSIZE = sys.maxsize
else:
    string_types = basestring,
    integer_types = (int, long)
    class_types = (type, types.ClassType)
    text_type = unicode
    binary_type = str

    if sys.platform.startswith("java"):
        # Jython always uses 32 bits.
        MAXSIZE = int((1 << 31) - 1)
    else:
        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
        class X(object):

            def __len__(self):
                return 1 << 31
        try:
            len(X())
        except OverflowError:
            # 32-bit
            MAXSIZE = int((1 << 31) - 1)
        else:
            # 64-bit
            MAXSIZE = int((1 << 63) - 1)
        del X


def _add_doc(func, doc):
    """Add documentation to a function."""
    func.__doc__ = doc


def _import_module(name):
    """Import module, returning the module after the last dot."""
    __import__(name)
    return sys.modules[name]


class _LazyDescr(object):

    def __init__(self, name):
        self.name = name

    def __get__(self, obj, tp):
        result = self._resolve()
        setattr(obj, self.name, result)  # Invokes __set__.
        try:
            # This is a bit ugly, but it avoids running this again by
            # removing this descriptor.
            delattr(obj.__class__, self.name)
        except AttributeError:
            pass
        return result


class MovedModule(_LazyDescr):

    def __init__(self, name, old, new=None):
        super(MovedModule, self).__init__(name)
        if PY3:
            if new is None:
                new = name
            self.mod = new
        else:
            self.mod = old

    def _resolve(self):
        return _import_module(self.mod)

    def __getattr__(self, attr):
        _module = self._resolve()
        value = getattr(_module, attr)
        setattr(self, attr, value)
        return value


class _LazyModule(types.ModuleType):

    def __init__(self, name):
        super(_LazyModule, self).__init__(name)
        self.__doc__ = self.__class__.__doc__

    def __dir__(self):
        attrs = ["__doc__", "__name__"]
        attrs += [attr.name for attr in self._moved_attributes]
        return attrs

    # Subclasses should override this
    _moved_attributes = []


class MovedAttribute(_LazyDescr):

    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
        super(MovedAttribute, self).__init__(name)
        if PY3:
            if new_mod is None:
                new_mod = name
            self.mod = new_mod
            if new_attr is None:
                if old_attr is None:
                    new_attr = name
                else:
                    new_attr = old_attr
            self.attr = new_attr
        else:
            self.mod = old_mod
            if old_attr is None:
                old_attr = name
            self.attr = old_attr

    def _resolve(self):
        module = _import_module(self.mod)
        return getattr(module, self.attr)


class _SixMetaPathImporter(object):

    """
    A meta path importer to import six.moves and its submodules.

    This class implements a PEP302 finder and loader. It should be compatible
    with Python 2.5 and all existing versions of Python3
    """

    def __init__(self, six_module_name):
        self.name = six_module_name
        self.known_modules = {}

    def _add_module(self, mod, *fullnames):
        for fullname in fullnames:
            self.known_modules[self.name + "." + fullname] = mod

    def _get_module(self, fullname):
        return self.known_modules[self.name + "." + fullname]

    def find_module(self, fullname, path=None):
        if fullname in self.known_modules:
            return self
        return None

    def __get_module(self, fullname):
        try:
            return self.known_modules[fullname]
        except KeyError:
            raise ImportError("This loader does not know module " + fullname)

    def load_module(self, fullname):
        try:
            # in case of a reload
            return sys.modules[fullname]
        except KeyError:
            pass
        mod = self.__get_module(fullname)
        if isinstance(mod, MovedModule):
            mod = mod._resolve()
        else:
            mod.__loader__ = self
        sys.modules[fullname] = mod
        return mod

    def is_package(self, fullname):
        """
        Return true, if the named module is a package.

        We need this method to get correct spec objects with
        Python 3.4 (see PEP451)
        """
        return hasattr(self.__get_module(fullname), "__path__")

    def get_code(self, fullname):
        """Return None

        Required, if is_package is implemented"""
        self.__get_module(fullname)  # eventually raises ImportError
        return None
    get_source = get_code  # same as get_code

_importer = _SixMetaPathImporter(__name__)


class _MovedItems(_LazyModule):

    """Lazy loading of moved objects"""
    __path__ = []  # mark as package


_moved_attributes = [
    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
    MovedAttribute("intern", "__builtin__", "sys"),
    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
    MovedAttribute("reduce", "__builtin__", "functools"),
    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
    MovedAttribute("StringIO", "StringIO", "io"),
    MovedAttribute("UserDict", "UserDict", "collections"),
    MovedAttribute("UserList", "UserList", "collections"),
    MovedAttribute("UserString", "UserString", "collections"),
    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
    MovedModule("builtins", "__builtin__"),
    MovedModule("configparser", "ConfigParser"),
    MovedModule("copyreg", "copy_reg"),
    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
    MovedModule("http_cookies", "Cookie", "http.cookies"),
    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
    MovedModule("html_parser", "HTMLParser", "html.parser"),
    MovedModule("http_client", "httplib", "http.client"),
    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
    MovedModule("cPickle", "cPickle", "pickle"),
    MovedModule("queue", "Queue"),
    MovedModule("reprlib", "repr"),
    MovedModule("socketserver", "SocketServer"),
    MovedModule("_thread", "thread", "_thread"),
    MovedModule("tkinter", "Tkinter"),
    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
    MovedModule("tkinter_colorchooser", "tkColorChooser",
                "tkinter.colorchooser"),
    MovedModule("tkinter_commondialog", "tkCommonDialog",
                "tkinter.commondialog"),
    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
                "tkinter.simpledialog"),
    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
    _moved_attributes += [
        MovedModule("winreg", "_winreg"),
    ]

for attr in _moved_attributes:
    setattr(_MovedItems, attr.name, attr)
    if isinstance(attr, MovedModule):
        _importer._add_module(attr, "moves." + attr.name)
del attr

_MovedItems._moved_attributes = _moved_attributes

moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")


class Module_six_moves_urllib_parse(_LazyModule):

    """Lazy loading of moved objects in six.moves.urllib_parse"""


_urllib_parse_moved_attributes = [
    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
    MovedAttribute("quote", "urllib", "urllib.parse"),
    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
    MovedAttribute("unquote", "urllib", "urllib.parse"),
    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
    MovedAttribute("urlencode", "urllib", "urllib.parse"),
    MovedAttribute("splitquery", "urllib", "urllib.parse"),
    MovedAttribute("splittag", "urllib", "urllib.parse"),
    MovedAttribute("splituser", "urllib", "urllib.parse"),
    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
    setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr

Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes

_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
                      "moves.urllib_parse", "moves.urllib.parse")


class Module_six_moves_urllib_error(_LazyModule):

    """Lazy loading of moved objects in six.moves.urllib_error"""


_urllib_error_moved_attributes = [
    MovedAttribute("URLError", "urllib2", "urllib.error"),
    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
    setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr

Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes

_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
                      "moves.urllib_error", "moves.urllib.error")


class Module_six_moves_urllib_request(_LazyModule):

    """Lazy loading of moved objects in six.moves.urllib_request"""


_urllib_request_moved_attributes = [
    MovedAttribute("urlopen", "urllib2", "urllib.request"),
    MovedAttribute("install_opener", "urllib2", "urllib.request"),
    MovedAttribute("build_opener", "urllib2", "urllib.request"),
    MovedAttribute("pathname2url", "urllib", "urllib.request"),
    MovedAttribute("url2pathname", "urllib", "urllib.request"),
    MovedAttribute("getproxies", "urllib", "urllib.request"),
    MovedAttribute("Request", "urllib2", "urllib.request"),
    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
    MovedAttribute("URLopener", "urllib", "urllib.request"),
    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
    setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr

Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes

_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
                      "moves.urllib_request", "moves.urllib.request")


class Module_six_moves_urllib_response(_LazyModule):

    """Lazy loading of moved objects in six.moves.urllib_response"""


_urllib_response_moved_attributes = [
    MovedAttribute("addbase", "urllib", "urllib.response"),
    MovedAttribute("addclosehook", "urllib", "urllib.response"),
    MovedAttribute("addinfo", "urllib", "urllib.response"),
    MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
    setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr

Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes

_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
                      "moves.urllib_response", "moves.urllib.response")


class Module_six_moves_urllib_robotparser(_LazyModule):

    """Lazy loading of moved objects in six.moves.urllib_robotparser"""


_urllib_robotparser_moved_attributes = [
    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr

Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes

_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
                      "moves.urllib_robotparser", "moves.urllib.robotparser")


class Module_six_moves_urllib(types.ModuleType):

    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
    __path__ = []  # mark as package
    parse = _importer._get_module("moves.urllib_parse")
    error = _importer._get_module("moves.urllib_error")
    request = _importer._get_module("moves.urllib_request")
    response = _importer._get_module("moves.urllib_response")
    robotparser = _importer._get_module("moves.urllib_robotparser")

    def __dir__(self):
        return ['parse', 'error', 'request', 'response', 'robotparser']

_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
                      "moves.urllib")


def add_move(move):
    """Add an item to six.moves."""
    setattr(_MovedItems, move.name, move)


def remove_move(name):
    """Remove item from six.moves."""
    try:
        delattr(_MovedItems, name)
    except AttributeError:
        try:
            del moves.__dict__[name]
        except KeyError:
            raise AttributeError("no such move, %r" % (name,))


if PY3:
    _meth_func = "__func__"
    _meth_self = "__self__"

    _func_closure = "__closure__"
    _func_code = "__code__"
    _func_defaults = "__defaults__"
    _func_globals = "__globals__"
else:
    _meth_func = "im_func"
    _meth_self = "im_self"

    _func_closure = "func_closure"
    _func_code = "func_code"
    _func_defaults = "func_defaults"
    _func_globals = "func_globals"


try:
    advance_iterator = next
except NameError:
    def advance_iterator(it):
        return it.next()
next = advance_iterator


try:
    callable = callable
except NameError:
    def callable(obj):
        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)


if PY3:
    def get_unbound_function(unbound):
        return unbound

    create_bound_method = types.MethodType

    def create_unbound_method(func, cls):
        return func

    Iterator = object
else:
    def get_unbound_function(unbound):
        return unbound.im_func

    def create_bound_method(func, obj):
        return types.MethodType(func, obj, obj.__class__)

    def create_unbound_method(func, cls):
        return types.MethodType(func, None, cls)

    class Iterator(object):

        def next(self):
            return type(self).__next__(self)

    callable = callable
_add_doc(get_unbound_function,
         """Get the function out of a possibly unbound function""")


get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)


if PY3:
    def iterkeys(d, **kw):
        return iter(d.keys(**kw))

    def itervalues(d, **kw):
        return iter(d.values(**kw))

    def iteritems(d, **kw):
        return iter(d.items(**kw))

    def iterlists(d, **kw):
        return iter(d.lists(**kw))

    viewkeys = operator.methodcaller("keys")

    viewvalues = operator.methodcaller("values")

    viewitems = operator.methodcaller("items")
else:
    def iterkeys(d, **kw):
        return d.iterkeys(**kw)

    def itervalues(d, **kw):
        return d.itervalues(**kw)

    def iteritems(d, **kw):
        return d.iteritems(**kw)

    def iterlists(d, **kw):
        return d.iterlists(**kw)

    viewkeys = operator.methodcaller("viewkeys")

    viewvalues = operator.methodcaller("viewvalues")

    viewitems = operator.methodcaller("viewitems")

_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
         "Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
         "Return an iterator over the (key, [values]) pairs of a dictionary.")


if PY3:
    def b(s):
        return s.encode("latin-1")

    def u(s):
        return s
    unichr = chr
    import struct
    int2byte = struct.Struct(">B").pack
    del struct
    byte2int = operator.itemgetter(0)
    indexbytes = operator.getitem
    iterbytes = iter
    import io
    StringIO = io.StringIO
    BytesIO = io.BytesIO
    _assertCountEqual = "assertCountEqual"
    if sys.version_info[1] <= 1:
        _assertRaisesRegex = "assertRaisesRegexp"
        _assertRegex = "assertRegexpMatches"
    else:
        _assertRaisesRegex = "assertRaisesRegex"
        _assertRegex = "assertRegex"
else:
    def b(s):
        return s
    # Workaround for standalone backslash

    def u(s):
        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
    unichr = unichr
    int2byte = chr

    def byte2int(bs):
        return ord(bs[0])

    def indexbytes(buf, i):
        return ord(buf[i])
    iterbytes = functools.partial(itertools.imap, ord)
    import StringIO
    StringIO = BytesIO = StringIO.StringIO
    _assertCountEqual = "assertItemsEqual"
    _assertRaisesRegex = "assertRaisesRegexp"
    _assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")


def assertCountEqual(self, *args, **kwargs):
    return getattr(self, _assertCountEqual)(*args, **kwargs)


def assertRaisesRegex(self, *args, **kwargs):
    return getattr(self, _assertRaisesRegex)(*args, **kwargs)


def assertRegex(self, *args, **kwargs):
    return getattr(self, _assertRegex)(*args, **kwargs)


if PY3:
    exec_ = getattr(moves.builtins, "exec")

    def reraise(tp, value, tb=None):
        if value is None:
            value = tp()
        if value.__traceback__ is not tb:
            raise value.with_traceback(tb)
        raise value

else:
    def exec_(_code_, _globs_=None, _locs_=None):
        """Execute code in a namespace."""
        if _globs_ is None:
            frame = sys._getframe(1)
            _globs_ = frame.f_globals
            if _locs_ is None:
                _locs_ = frame.f_locals
            del frame
        elif _locs_ is None:
            _locs_ = _globs_
        exec("""exec _code_ in _globs_, _locs_""")

    exec_("""def reraise(tp, value, tb=None):
    raise tp, value, tb
""")


if sys.version_info[:2] == (3, 2):
    exec_("""def raise_from(value, from_value):
    if from_value is None:
        raise value
    raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
    exec_("""def raise_from(value, from_value):
    raise value from from_value
""")
else:
    def raise_from(value, from_value):
        raise value


print_ = getattr(moves.builtins, "print", None)
if print_ is None:
    def print_(*args, **kwargs):
        """The new-style print function for Python 2.4 and 2.5."""
        fp = kwargs.pop("file", sys.stdout)
        if fp is None:
            return

        def write(data):
            if not isinstance(data, basestring):
                data = str(data)
            # If the file has an encoding, encode unicode with it.
            if (isinstance(fp, file) and
                    isinstance(data, unicode) and
                    fp.encoding is not None):
                errors = getattr(fp, "errors", None)
                if errors is None:
                    errors = "strict"
                data = data.encode(fp.encoding, errors)
            fp.write(data)
        want_unicode = False
        sep = kwargs.pop("sep", None)
        if sep is not None:
            if isinstance(sep, unicode):
                want_unicode = True
            elif not isinstance(sep, str):
                raise TypeError("sep must be None or a string")
        end = kwargs.pop("end", None)
        if end is not None:
            if isinstance(end, unicode):
                want_unicode = True
            elif not isinstance(end, str):
                raise TypeError("end must be None or a string")
        if kwargs:
            raise TypeError("invalid keyword arguments to print()")
        if not want_unicode:
            for arg in args:
                if isinstance(arg, unicode):
                    want_unicode = True
                    break
        if want_unicode:
            newline = unicode("\n")
            space = unicode(" ")
        else:
            newline = "\n"
            space = " "
        if sep is None:
            sep = space
        if end is None:
            end = newline
        for i, arg in enumerate(args):
            if i:
                write(sep)
            write(arg)
        write(end)
if sys.version_info[:2] < (3, 3):
    _print = print_

    def print_(*args, **kwargs):
        fp = kwargs.get("file", sys.stdout)
        flush = kwargs.pop("flush", False)
        _print(*args, **kwargs)
        if flush and fp is not None:
            fp.flush()

_add_doc(reraise, """Reraise an exception.""")

if sys.version_info[0:2] < (3, 4):
    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
              updated=functools.WRAPPER_UPDATES):
        def wrapper(f):
            f = functools.wraps(wrapped, assigned, updated)(f)
            f.__wrapped__ = wrapped
            return f
        return wrapper
else:
    wraps = functools.wraps


def with_metaclass(meta, *bases):
    """Create a base class with a metaclass."""
    # This requires a bit of explanation: the basic idea is to make a dummy
    # metaclass for one level of class instantiation that replaces itself with
    # the actual metaclass.
    class metaclass(meta):

        def __new__(cls, name, this_bases, d):
            return meta(name, bases, d)
    return type.__new__(metaclass, 'temporary_class', (), {})


def add_metaclass(metaclass):
    """Class decorator for creating a class with a metaclass."""
    def wrapper(cls):
        orig_vars = cls.__dict__.copy()
        slots = orig_vars.get('__slots__')
        if slots is not None:
            if isinstance(slots, str):
                slots = [slots]
            for slots_var in slots:
                orig_vars.pop(slots_var)
        orig_vars.pop('__dict__', None)
        orig_vars.pop('__weakref__', None)
        return metaclass(cls.__name__, cls.__bases__, orig_vars)
    return wrapper


def python_2_unicode_compatible(klass):
    """
    A decorator that defines __unicode__ and __str__ methods under Python 2.
    Under Python 3 it does nothing.

    To support Python 2 and 3 with a single code base, define a __str__ method
    returning text and apply this decorator to the class.
    """
    if PY2:
        if '__str__' not in klass.__dict__:
            raise ValueError("@python_2_unicode_compatible cannot be applied "
                             "to %s because it doesn't define __str__()." %
                             klass.__name__)
        klass.__unicode__ = klass.__str__
        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
    return klass


# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = []  # required for PEP 302 and PEP 451
__package__ = __name__  # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
    for i, importer in enumerate(sys.meta_path):
        # Here's some real nastiness: Another "instance" of the six module might
        # be floating around. Therefore, we can't use isinstance() to check for
        # the six meta path importer, since the other six instance will have
        # inserted an importer with different class.
        if (type(importer).__name__ == "_SixMetaPathImporter" and
                importer.name == __name__):
            del sys.meta_path[i]
            break
    del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
PK�V[�h�PP*_vendor/__pycache__/appdirs.cpython-38.pycnu�[���U

�Qabg`�@s�dZdZd�eee��ZddlZddlZejddkZ	e	r>eZ
ej�d�r�ddlZe�
�ddZe�d�rrdZq�e�d	�r�d
Zq�dZnejZd4d
d�Zd5dd�Zd6dd�Zd7dd�Zd8dd�Zd9dd�Zd:dd�ZGdd�de�Zdd�Zd d!�Zd"d#�Zd$d%�Zedk�r�zddlZeZWnne k
�r�zdd&l!m"Z"eZWnBe k
�r�zddl#Z$eZWne k
�r�eZYnXYnXYnXe%d'k�r�d(Z&d)Z'd*Z(e)d+e�e)d,�ee&e'd-d.�Z*e(D]Z+e)d/e+e,e*e+�f��q�e)d0�ee&e'�Z*e(D]Z+e)d/e+e,e*e+�f��q�e)d1�ee&�Z*e(D]Z+e)d/e+e,e*e+�f��q0e)d2�ee&dd3�Z*e(D]Z+e)d/e+e,e*e+�f��qfdS);zyUtilities for determining application-specific dirs.

See <http://github.com/ActiveState/appdirs> for details and usage.
)����.�Nr�javaZWindows�win32ZMac�darwinZlinux2FcCs�tdkr^|dkr|}|rdpd}tj�t|��}|r�|dk	rNtj�|||�}q�tj�||�}nNtdkr�tj�d�}|r�tj�||�}n&t�dtj�d	��}|r�tj�||�}|r�|r�tj�||�}|S)
aJReturn full path to the user-specific data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user data directories are:
        Mac OS X:               ~/Library/Application Support/<AppName>
        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>

    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
    That means, by default "~/.local/share/<AppName>".
    rN�
CSIDL_APPDATA�CSIDL_LOCAL_APPDATAFrz~/Library/Application Support/Z
XDG_DATA_HOMEz~/.local/share��system�os�path�normpath�_get_win_folder�join�
expanduser�getenv)�appname�	appauthor�version�roaming�constr�r�A/usr/lib/python3.8/site-packages/pkg_resources/_vendor/appdirs.py�
user_data_dir,s& rcstdkrR|dkr�}tj�td��}�r�|dk	rBtj�||��}q�tj�|��}n�tdkrztj�d�}�r�tj�|��}ntt�dtj�dd	g��}d
d�|�	tj�D�}�r�|r�tj��|���fdd�|D�}|r�tj�|�}n|d
}|S��r|�rtj�||�}|S)aiReturn full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of data dirs should be
            returned. By default, the first item from XDG_DATA_DIRS is
            returned, or '/usr/local/share/<AppName>',
            if XDG_DATA_DIRS is not set

    Typical site data directories are:
        Mac OS X:   /Library/Application Support/<AppName>
        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.

    For Unix, this is using the $XDG_DATA_DIRS[0] default.

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    rN�CSIDL_COMMON_APPDATAFrz/Library/Application SupportZ
XDG_DATA_DIRSz/usr/local/sharez
/usr/sharecSs g|]}tj�|�tj���qSr�r
rr�rstrip�sep��.0�xrrr�
<listcomp>�sz!site_data_dir.<locals>.<listcomp>csg|]}tj�|�g��qSr�r
rrr �rrrr#�sr)
rr
rrrrrr�pathsep�split�rrr�	multipathrZpathlistrr%r�
site_data_dircs6�r*cCsXtdkrt||d|�}n&t�dtj�d��}|r>tj�||�}|rT|rTtj�||�}|S)a�Return full path to the user-specific config dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user config directories are:
        Mac OS X:               same as user_data_dir
        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
        Win *:                  same as user_data_dir

    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
    That means, by default "~/.config/<AppName>".
    �rrNZXDG_CONFIG_HOMEz	~/.config�rrr
rrrr�rrrrrrrr�user_config_dir�sr.cs�tdkr*t�|�}�r�|r�tj�||�}ndt�dd�}dd�|�tj�D�}�rt|rbtj��|���fdd�|D�}|r�tj�|�}n|d}|S)aReturn full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of config dirs should be
            returned. By default, the first item from XDG_CONFIG_DIRS is
            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set

    Typical site config directories are:
        Mac OS X:   same as site_data_dir
        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
                    $XDG_CONFIG_DIRS
        Win *:      same as site_data_dir
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)

    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    r+ZXDG_CONFIG_DIRSz/etc/xdgcSs g|]}tj�|�tj���qSrrr rrrr#�sz#site_config_dir.<locals>.<listcomp>csg|]}tj�|�g��qSrr$r r%rrr#�sr)rr*r
rrrr'r&r(rr%r�site_config_dir�s
r/TcCs�tdkrd|dkr|}tj�td��}|r�|dk	rBtj�|||�}ntj�||�}|r�tj�|d�}nNtdkr�tj�d�}|r�tj�||�}n&t�dtj�d	��}|r�tj�||�}|r�|r�tj�||�}|S)
aReturn full path to the user-specific cache dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Cache" to the base app data dir for Windows. See
            discussion below.

    Typical user cache directories are:
        Mac OS X:   ~/Library/Caches/<AppName>
        Unix:       ~/.cache/<AppName> (XDG default)
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache

    On Windows the only suggestion in the MSDN docs is that local settings go in
    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
    app data dir (the default returned by `user_data_dir` above). Apps typically
    put cache data somewhere *under* the given dir here. Some examples:
        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
        ...\Acme\SuperApp\Cache\1.0
    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
    This can be disabled with the `opinion=False` option.
    rNr
FZCacherz~/Library/CachesZXDG_CACHE_HOMEz~/.cacher�rrrZopinionrrrr�user_cache_dirs(!r1cCsXtdkrt||d|�}n&t�dtj�d��}|r>tj�||�}|rT|rTtj�||�}|S)aReturn full path to the user-specific state dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user state directories are:
        Mac OS X:  same as user_data_dir
        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
        Win *:     same as user_data_dir

    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
    to extend the XDG spec and support $XDG_STATE_HOME.

    That means, by default "~/.local/state/<AppName>".
    r+NZXDG_STATE_HOMEz~/.local/stater,r-rrr�user_state_dir9sr2cCs�tdkr tj�tj�d�|�}nNtdkrLt|||�}d}|rntj�|d�}n"t|||�}d}|rntj�|d�}|r�|r�tj�||�}|S)a�Return full path to the user-specific log dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Logs" to the base app data dir for Windows, and "log" to the
            base cache dir for Unix. See discussion below.

    Typical user log directories are:
        Mac OS X:   ~/Library/Logs/<AppName>
        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs

    On Windows the only suggestion in the MSDN docs is that local settings
    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
    examples of what some windows apps use for a logs dir.)

    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
    value for Windows and appends "log" to the user cache dir for Unix.
    This can be disabled with the `opinion=False` option.
    rz~/Library/LogsrFZLogs�log)rr
rrrrr1r0rrr�user_log_dircs" 
�r4c@sneZdZdZddd�Zedd��Zedd	��Zed
d��Zedd
��Z	edd��Z
edd��Zedd��ZdS)�AppDirsz1Convenience wrapper for getting application dirs.NFcCs"||_||_||_||_||_dS)N)rrrrr))�selfrrrrr)rrr�__init__�s
zAppDirs.__init__cCst|j|j|j|jd�S�N)rr)rrrrr�r6rrrr�s
�zAppDirs.user_data_dircCst|j|j|j|jd�S�N)rr))r*rrrr)r9rrrr*�s
�zAppDirs.site_data_dircCst|j|j|j|jd�Sr8)r.rrrrr9rrrr.�s
�zAppDirs.user_config_dircCst|j|j|j|jd�Sr:)r/rrrr)r9rrrr/�s
�zAppDirs.site_config_dircCst|j|j|jd�S�N�r)r1rrrr9rrrr1�s
�zAppDirs.user_cache_dircCst|j|j|jd�Sr;)r2rrrr9rrrr2�s
�zAppDirs.user_state_dircCst|j|j|jd�Sr;)r4rrrr9rrrr4�s
�zAppDirs.user_log_dir)NNNFF)
�__name__�
__module__�__qualname__�__doc__r7�propertyrr*r.r/r1r2r4rrrrr5�s&�






r5cCsHtrddl}nddl}dddd�|}|�|jd�}|�||�\}}|S)z�This is a fallback technique at best. I'm not sure if using the
    registry for this guarantees us the correct answer for all CSIDL_*
    names.
    rNZAppDatazCommon AppDataz
Local AppData�r	rr
z@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders)�PY3�winreg�_winreg�OpenKey�HKEY_CURRENT_USERZQueryValueEx)�
csidl_namerEZshell_folder_name�key�dir�typerrr�_get_win_folder_from_registry�s
���rLcCs�ddlm}m}|�dt||�dd�}z^t|�}d}|D]}t|�dkr8d}qRq8|r�zddl}|�|�}Wnt	k
r�YnXWnt
k
r�YnX|S)Nr)�shellcon�shellF�T)�win32com.shellrMrN�SHGetFolderPath�getattr�unicode�ord�win32api�GetShortPathName�ImportError�UnicodeError)rHrMrNrJ�
has_high_char�crUrrr�_get_win_folder_with_pywin32�s$
r[cCs�ddl}dddd�|}|�d�}|jj�d|dd|�d}|D]}t|�dkr@d	}qZq@|r�|�d�}|jj�|j|d�r�|}|jS)
Nr��#�rBiFrOT)	�ctypesZcreate_unicode_buffer�windllZshell32ZSHGetFolderPathWrTZkernel32ZGetShortPathNameW�value)rHr_Zcsidl_const�bufrYrZZbuf2rrr�_get_win_folder_with_ctypes�s&��

rccCs�ddl}ddlm}ddlm}|jjd}|�d|�}|jj	}|�
dt|j|�d|jj
|�|j�|����d�}d}|D]}	t|	�dkr|d	}q�q||r�|�d|�}|jj	}
|
�|||�r�|j�|����d�}|S)
Nr)�jna)r�rZ�FrOT)�arrayZcom.sunrdZcom.sun.jna.platformrZWinDefZMAX_PATHZzerosZShell32ZINSTANCErQrRZShlObjZSHGFP_TYPE_CURRENTZNativeZtoStringZtostringrrTZKernel32rV)rHrgrdrZbuf_sizerbrNrJrYrZZkernelrrr�_get_win_folder_with_jnas&rh)r`�__main__ZMyAppZ	MyCompany)rr.r1r2r4r*r/z-- app dirs %s --z%-- app dirs (with optional 'version')z1.0r<z%s: %sz)
-- app dirs (without optional 'version')z+
-- app dirs (without optional 'appauthor')z(
-- app dirs (with disabled 'appauthor'))r)NNNF)NNNF)NNNF)NNNF)NNNT)NNNF)NNNT)-r@Z__version_info__r�map�str�__version__�sysr
�version_inforCrS�platform�
startswithZjava_verZos_namerrr*r.r/r1r2r4�objectr5rLr[rcrhrPZwin32comrrWr_r`Zcom.sun.jnaZcomr=rrZprops�print�dirsZproprRrrrr�<module>s~



7
B
(
3
9
*
30


PK�V[#R���1_vendor/__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

�Qab�@sdS)N�rrr�B/usr/lib/python3.8/site-packages/pkg_resources/_vendor/__init__.py�<module>�PK�V[�h�PP0_vendor/__pycache__/appdirs.cpython-38.opt-1.pycnu�[���U

�Qabg`�@s�dZdZd�eee��ZddlZddlZejddkZ	e	r>eZ
ej�d�r�ddlZe�
�ddZe�d�rrdZq�e�d	�r�d
Zq�dZnejZd4d
d�Zd5dd�Zd6dd�Zd7dd�Zd8dd�Zd9dd�Zd:dd�ZGdd�de�Zdd�Zd d!�Zd"d#�Zd$d%�Zedk�r�zddlZeZWnne k
�r�zdd&l!m"Z"eZWnBe k
�r�zddl#Z$eZWne k
�r�eZYnXYnXYnXe%d'k�r�d(Z&d)Z'd*Z(e)d+e�e)d,�ee&e'd-d.�Z*e(D]Z+e)d/e+e,e*e+�f��q�e)d0�ee&e'�Z*e(D]Z+e)d/e+e,e*e+�f��q�e)d1�ee&�Z*e(D]Z+e)d/e+e,e*e+�f��q0e)d2�ee&dd3�Z*e(D]Z+e)d/e+e,e*e+�f��qfdS);zyUtilities for determining application-specific dirs.

See <http://github.com/ActiveState/appdirs> for details and usage.
)����.�Nr�javaZWindows�win32ZMac�darwinZlinux2FcCs�tdkr^|dkr|}|rdpd}tj�t|��}|r�|dk	rNtj�|||�}q�tj�||�}nNtdkr�tj�d�}|r�tj�||�}n&t�dtj�d	��}|r�tj�||�}|r�|r�tj�||�}|S)
aJReturn full path to the user-specific data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user data directories are:
        Mac OS X:               ~/Library/Application Support/<AppName>
        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>

    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
    That means, by default "~/.local/share/<AppName>".
    rN�
CSIDL_APPDATA�CSIDL_LOCAL_APPDATAFrz~/Library/Application Support/Z
XDG_DATA_HOMEz~/.local/share��system�os�path�normpath�_get_win_folder�join�
expanduser�getenv)�appname�	appauthor�version�roaming�constr�r�A/usr/lib/python3.8/site-packages/pkg_resources/_vendor/appdirs.py�
user_data_dir,s& rcstdkrR|dkr�}tj�td��}�r�|dk	rBtj�||��}q�tj�|��}n�tdkrztj�d�}�r�tj�|��}ntt�dtj�dd	g��}d
d�|�	tj�D�}�r�|r�tj��|���fdd�|D�}|r�tj�|�}n|d
}|S��r|�rtj�||�}|S)aiReturn full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of data dirs should be
            returned. By default, the first item from XDG_DATA_DIRS is
            returned, or '/usr/local/share/<AppName>',
            if XDG_DATA_DIRS is not set

    Typical site data directories are:
        Mac OS X:   /Library/Application Support/<AppName>
        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.

    For Unix, this is using the $XDG_DATA_DIRS[0] default.

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    rN�CSIDL_COMMON_APPDATAFrz/Library/Application SupportZ
XDG_DATA_DIRSz/usr/local/sharez
/usr/sharecSs g|]}tj�|�tj���qSr�r
rr�rstrip�sep��.0�xrrr�
<listcomp>�sz!site_data_dir.<locals>.<listcomp>csg|]}tj�|�g��qSr�r
rrr �rrrr#�sr)
rr
rrrrrr�pathsep�split�rrr�	multipathrZpathlistrr%r�
site_data_dircs6�r*cCsXtdkrt||d|�}n&t�dtj�d��}|r>tj�||�}|rT|rTtj�||�}|S)a�Return full path to the user-specific config dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user config directories are:
        Mac OS X:               same as user_data_dir
        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
        Win *:                  same as user_data_dir

    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
    That means, by default "~/.config/<AppName>".
    �rrNZXDG_CONFIG_HOMEz	~/.config�rrr
rrrr�rrrrrrrr�user_config_dir�sr.cs�tdkr*t�|�}�r�|r�tj�||�}ndt�dd�}dd�|�tj�D�}�rt|rbtj��|���fdd�|D�}|r�tj�|�}n|d}|S)aReturn full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of config dirs should be
            returned. By default, the first item from XDG_CONFIG_DIRS is
            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set

    Typical site config directories are:
        Mac OS X:   same as site_data_dir
        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
                    $XDG_CONFIG_DIRS
        Win *:      same as site_data_dir
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)

    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    r+ZXDG_CONFIG_DIRSz/etc/xdgcSs g|]}tj�|�tj���qSrrr rrrr#�sz#site_config_dir.<locals>.<listcomp>csg|]}tj�|�g��qSrr$r r%rrr#�sr)rr*r
rrrr'r&r(rr%r�site_config_dir�s
r/TcCs�tdkrd|dkr|}tj�td��}|r�|dk	rBtj�|||�}ntj�||�}|r�tj�|d�}nNtdkr�tj�d�}|r�tj�||�}n&t�dtj�d	��}|r�tj�||�}|r�|r�tj�||�}|S)
aReturn full path to the user-specific cache dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Cache" to the base app data dir for Windows. See
            discussion below.

    Typical user cache directories are:
        Mac OS X:   ~/Library/Caches/<AppName>
        Unix:       ~/.cache/<AppName> (XDG default)
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache

    On Windows the only suggestion in the MSDN docs is that local settings go in
    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
    app data dir (the default returned by `user_data_dir` above). Apps typically
    put cache data somewhere *under* the given dir here. Some examples:
        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
        ...\Acme\SuperApp\Cache\1.0
    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
    This can be disabled with the `opinion=False` option.
    rNr
FZCacherz~/Library/CachesZXDG_CACHE_HOMEz~/.cacher�rrrZopinionrrrr�user_cache_dirs(!r1cCsXtdkrt||d|�}n&t�dtj�d��}|r>tj�||�}|rT|rTtj�||�}|S)aReturn full path to the user-specific state dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user state directories are:
        Mac OS X:  same as user_data_dir
        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
        Win *:     same as user_data_dir

    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
    to extend the XDG spec and support $XDG_STATE_HOME.

    That means, by default "~/.local/state/<AppName>".
    r+NZXDG_STATE_HOMEz~/.local/stater,r-rrr�user_state_dir9sr2cCs�tdkr tj�tj�d�|�}nNtdkrLt|||�}d}|rntj�|d�}n"t|||�}d}|rntj�|d�}|r�|r�tj�||�}|S)a�Return full path to the user-specific log dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Logs" to the base app data dir for Windows, and "log" to the
            base cache dir for Unix. See discussion below.

    Typical user log directories are:
        Mac OS X:   ~/Library/Logs/<AppName>
        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs

    On Windows the only suggestion in the MSDN docs is that local settings
    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
    examples of what some windows apps use for a logs dir.)

    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
    value for Windows and appends "log" to the user cache dir for Unix.
    This can be disabled with the `opinion=False` option.
    rz~/Library/LogsrFZLogs�log)rr
rrrrr1r0rrr�user_log_dircs" 
�r4c@sneZdZdZddd�Zedd��Zedd	��Zed
d��Zedd
��Z	edd��Z
edd��Zedd��ZdS)�AppDirsz1Convenience wrapper for getting application dirs.NFcCs"||_||_||_||_||_dS)N)rrrrr))�selfrrrrr)rrr�__init__�s
zAppDirs.__init__cCst|j|j|j|jd�S�N)rr)rrrrr�r6rrrr�s
�zAppDirs.user_data_dircCst|j|j|j|jd�S�N)rr))r*rrrr)r9rrrr*�s
�zAppDirs.site_data_dircCst|j|j|j|jd�Sr8)r.rrrrr9rrrr.�s
�zAppDirs.user_config_dircCst|j|j|j|jd�Sr:)r/rrrr)r9rrrr/�s
�zAppDirs.site_config_dircCst|j|j|jd�S�N�r)r1rrrr9rrrr1�s
�zAppDirs.user_cache_dircCst|j|j|jd�Sr;)r2rrrr9rrrr2�s
�zAppDirs.user_state_dircCst|j|j|jd�Sr;)r4rrrr9rrrr4�s
�zAppDirs.user_log_dir)NNNFF)
�__name__�
__module__�__qualname__�__doc__r7�propertyrr*r.r/r1r2r4rrrrr5�s&�






r5cCsHtrddl}nddl}dddd�|}|�|jd�}|�||�\}}|S)z�This is a fallback technique at best. I'm not sure if using the
    registry for this guarantees us the correct answer for all CSIDL_*
    names.
    rNZAppDatazCommon AppDataz
Local AppData�r	rr
z@Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders)�PY3�winreg�_winreg�OpenKey�HKEY_CURRENT_USERZQueryValueEx)�
csidl_namerEZshell_folder_name�key�dir�typerrr�_get_win_folder_from_registry�s
���rLcCs�ddlm}m}|�dt||�dd�}z^t|�}d}|D]}t|�dkr8d}qRq8|r�zddl}|�|�}Wnt	k
r�YnXWnt
k
r�YnX|S)Nr)�shellcon�shellF�T)�win32com.shellrMrN�SHGetFolderPath�getattr�unicode�ord�win32api�GetShortPathName�ImportError�UnicodeError)rHrMrNrJ�
has_high_char�crUrrr�_get_win_folder_with_pywin32�s$
r[cCs�ddl}dddd�|}|�d�}|jj�d|dd|�d}|D]}t|�dkr@d	}qZq@|r�|�d�}|jj�|j|d�r�|}|jS)
Nr��#�rBiFrOT)	�ctypesZcreate_unicode_buffer�windllZshell32ZSHGetFolderPathWrTZkernel32ZGetShortPathNameW�value)rHr_Zcsidl_const�bufrYrZZbuf2rrr�_get_win_folder_with_ctypes�s&��

rccCs�ddl}ddlm}ddlm}|jjd}|�d|�}|jj	}|�
dt|j|�d|jj
|�|j�|����d�}d}|D]}	t|	�dkr|d	}q�q||r�|�d|�}|jj	}
|
�|||�r�|j�|����d�}|S)
Nr)�jna)r�rZ�FrOT)�arrayZcom.sunrdZcom.sun.jna.platformrZWinDefZMAX_PATHZzerosZShell32ZINSTANCErQrRZShlObjZSHGFP_TYPE_CURRENTZNativeZtoStringZtostringrrTZKernel32rV)rHrgrdrZbuf_sizerbrNrJrYrZZkernelrrr�_get_win_folder_with_jnas&rh)r`�__main__ZMyAppZ	MyCompany)rr.r1r2r4r*r/z-- app dirs %s --z%-- app dirs (with optional 'version')z1.0r<z%s: %sz)
-- app dirs (without optional 'version')z+
-- app dirs (without optional 'appauthor')z(
-- app dirs (with disabled 'appauthor'))r)NNNF)NNNF)NNNF)NNNF)NNNT)NNNF)NNNT)-r@Z__version_info__r�map�str�__version__�sysr
�version_inforCrS�platform�
startswithZjava_verZos_namerrr*r.r/r1r2r4�objectr5rLr[rcrhrPZwin32comrrWr_r`Zcom.sun.jnaZcomr=rrZprops�print�dirsZproprRrrrr�<module>s~



7
B
(
3
9
*
30


PK�V[:��-n_n_&_vendor/__pycache__/six.cpython-38.pycnu�[���U

�Qab�u�A@sRdZddlmZddlZddlZddlZddlZddlZdZdZ	ej
ddkZej
ddkZej
dd�dkZ
er�efZefZefZeZeZejZn~efZeefZeejfZeZeZej�d	�r�ed
�ZnHGdd�de�Z ze!e ��Wne"k
�red
�ZYn
Xed
�Z[ dd�Z#dd�Z$Gdd�de�Z%Gdd�de%�Z&Gdd�dej'�Z(Gdd�de%�Z)Gdd�de�Z*e*e+�Z,Gdd�de(�Z-e)dddd �e)d!d"d#d$d!�e)d%d"d"d&d%�e)d'd(d#d)d'�e)d*d(d+�e)d,d"d#d-d,�e)d.d/d/d0d.�e)d1d/d/d.d1�e)d2d(d#d3d2�e)d4d(e
�rd5nd6d7�e)d8d(d9�e)d:d;d<d=�e)d d d�e)d>d>d?�e)d@d@d?�e)dAdAd?�e)d3d(d#d3d2�e)dBd"d#dCdB�e)dDd"d"dEdD�e&d#d(�e&dFdG�e&dHdI�e&dJdKdL�e&dMdNdM�e&dOdPdQ�e&dRdSdT�e&dUdVdW�e&dXdYdZ�e&d[d\d]�e&d^d_d`�e&dadbdc�e&dddedf�e&dgdhdi�e&djdjdk�e&dldldk�e&dmdmdk�e&dndndo�e&dpdq�e&drds�e&dtdu�e&dvdwdv�e&dxdy�e&dzd{d|�e&d}d~d�e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d�e&d�d�d��e&d�d�d��e&d�d�d��e&d�e+d�d��e&d�e+d�d��e&d�e+d�e+d��e&d�d�d��e&d�d�d��e&d�d�d��g>Z.ejd�k�rRe.e&d�d��g7Z.e.D]2Z/e0e-e/j1e/�e2e/e&��rVe,�3e/d�e/j1��qV[/e.e-_.e-e+d��Z4e,�3e4d��Gd�d��d�e(�Z5e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d=d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��gZ6e6D]Z/e0e5e/j1e/��q�[/e6e5_.e,�3e5e+d��d�dҡGd�dԄd�e(�Z7e)d�d�d��e)d�d�d��e)d�d�d��gZ8e8D]Z/e0e7e/j1e/��q[/e8e7_.e,�3e7e+d��d�dۡGd�d݄d�e(�Z9e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃g!Z:e:D]Z/e0e9e/j1e/��q�[/e:e9_.e,�3e9e+�d��d�d�G�d�d��de(�Z;e)�dd��d�e)�dd��d�e)�dd��d�e)�d	d��d�gZ<e<D]Z/e0e;e/j1e/��q8[/e<e;_.e,�3e;e+�d
��d�d�G�d
�d��de(�Z=e)�dd�d��gZ>e>D]Z/e0e=e/j1e/��q�[/e>e=_.e,�3e=e+�d��d�d�G�d�d��dej'�Z?e,�3e?e+d���d��d�d�Z@�d�d�ZAe�	rH�dZB�dZC�dZD�dZE�dZF�dZGn$�d ZB�d!ZC�d"ZD�d#ZE�d$ZF�d%ZGzeHZIWn"eJk
�	r��d&�d'�ZIYnXeIZHzeKZKWn"eJk
�	r��d(�d)�ZKYnXe�	r�d*�d+�ZLejMZN�d,�d-�ZOeZPn>�d.�d+�ZL�d/�d0�ZN�d1�d-�ZOG�d2�d3��d3e�ZPeKZKe#eL�d4�e�QeB�ZRe�QeC�ZSe�QeD�ZTe�QeE�ZUe�QeF�ZVe�QeG�ZWe�
rԐd5�d6�ZX�d7�d8�ZY�d9�d:�ZZ�d;�d<�Z[e�\�d=�Z]e�\�d>�Z^e�\�d?�Z_nT�d@�d6�ZX�dA�d8�ZY�dB�d:�ZZ�dC�d<�Z[e�\�dD�Z]e�\�dE�Z^e�\�dF�Z_e#eX�dG�e#eY�dH�e#eZ�dI�e#e[�dJ�e�r�dK�dL�Z`�dM�dN�ZaebZcddldZded�e�dO�jfZg[de�hd�ZiejjZkelZmddlnZnenjoZoenjpZp�dPZqej
�dQ�dQk�r�dRZr�dSZsn�dTZr�dUZsnj�dV�dL�Z`�dW�dN�ZaecZcebZg�dX�dY�Zi�dZ�d[�Zke�tejuev�ZmddloZoeojoZoZp�d\Zq�dRZr�dSZse#e`�d]�e#ea�d^��d_�dP�Zw�d`�dT�Zx�da�dU�Zye�r�eze4j{�db�Z|�d|�dc�dd�Z}n�d}�de�df�Z|e|�dg�ej
dd��dhk�r�e|�di�n.ej
dd��dhk�
re|�dj�n�dk�dl�Z~eze4j{�dmd�Zedk�
rL�dn�do�Zej
dd��dpk�
rreZ��dq�do�Ze#e}�dr�ej
dd�dk�
r�ej�ej�f�ds�dt�Z�nej�Z��du�dv�Z��dw�dx�Z��dy�dz�Z�gZ�e+Z�e�����d{�dk	�
r�ge�_�ej��rBe�ej��D]4\Z�Z�ee��j+dk�re�j1e+k�rej�e�=�q>�q[�[�ej���e,�dS(~z6Utilities for writing code that runs on Python 2 and 3�)�absolute_importNz'Benjamin Peterson <benjamin@python.org>z1.10.0��)r��javai���c@seZdZdd�ZdS)�XcCsdS)Nl���selfrr�=/usr/lib/python3.8/site-packages/pkg_resources/_vendor/six.py�__len__>sz	X.__len__N)�__name__�
__module__�__qualname__rrrrrr<srl����cCs
||_dS)z Add documentation to a function.N)�__doc__)�func�docrrr�_add_docKsrcCst|�tj|S)z7Import module, returning the module after the last dot.)�
__import__�sys�modules��namerrr�_import_modulePsrc@seZdZdd�Zdd�ZdS)�
_LazyDescrcCs
||_dS�Nr�r
rrrr�__init__Xsz_LazyDescr.__init__cCsB|��}t||j|�zt|j|j�Wntk
r<YnX|Sr)�_resolve�setattrr�delattr�	__class__�AttributeError)r
�obj�tp�resultrrr�__get__[sz_LazyDescr.__get__N)r
rrrr&rrrrrVsrcs.eZdZd�fdd�	Zdd�Zdd�Z�ZS)	�MovedModuleNcs2tt|��|�tr(|dkr |}||_n||_dSr)�superr'r�PY3�mod)r
r�old�new�r!rrriszMovedModule.__init__cCs
t|j�Sr)rr*r	rrrrrszMovedModule._resolvecCs"|��}t||�}t|||�|Sr)r�getattrr)r
�attr�_module�valuerrr�__getattr__us
zMovedModule.__getattr__)N)r
rrrrr2�
__classcell__rrr-rr'gs	r'cs(eZdZ�fdd�Zdd�ZgZ�ZS)�_LazyModulecstt|��|�|jj|_dSr)r(r4rr!rrr-rrr~sz_LazyModule.__init__cCs ddg}|dd�|jD�7}|S)Nrr
cSsg|]
}|j�qSrr)�.0r/rrr�
<listcomp>�sz'_LazyModule.__dir__.<locals>.<listcomp>)�_moved_attributes)r
Zattrsrrr�__dir__�sz_LazyModule.__dir__)r
rrrr8r7r3rrr-rr4|sr4cs&eZdZd�fdd�	Zdd�Z�ZS)�MovedAttributeNcsdtt|��|�trH|dkr |}||_|dkr@|dkr<|}n|}||_n||_|dkrZ|}||_dSr)r(r9rr)r*r/)r
rZold_modZnew_modZold_attrZnew_attrr-rrr�szMovedAttribute.__init__cCst|j�}t||j�Sr)rr*r.r/)r
�modulerrrr�s
zMovedAttribute._resolve)NN)r
rrrrr3rrr-rr9�sr9c@sVeZdZdZdd�Zdd�Zdd�Zdd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZeZdS)�_SixMetaPathImporterz�
    A meta path importer to import six.moves and its submodules.

    This class implements a PEP302 finder and loader. It should be compatible
    with Python 2.5 and all existing versions of Python3
    cCs||_i|_dSr)r�
known_modules)r
Zsix_module_namerrrr�sz_SixMetaPathImporter.__init__cGs"|D]}||j|jd|<qdS�N�.�r<r)r
r*Z	fullnames�fullnamerrr�_add_module�sz _SixMetaPathImporter._add_modulecCs|j|jd|Sr=r?�r
r@rrr�_get_module�sz _SixMetaPathImporter._get_moduleNcCs||jkr|SdSr)r<)r
r@�pathrrr�find_module�s
z _SixMetaPathImporter.find_modulecCs2z|j|WStk
r,td|��YnXdS)Nz!This loader does not know module )r<�KeyError�ImportErrorrBrrrZ__get_module�sz!_SixMetaPathImporter.__get_modulecCsTztj|WStk
r YnX|�|�}t|t�r@|��}n||_|tj|<|Sr)rrrF� _SixMetaPathImporter__get_module�
isinstancer'r�
__loader__)r
r@r*rrr�load_module�s



z _SixMetaPathImporter.load_modulecCst|�|�d�S)z�
        Return true, if the named module is a package.

        We need this method to get correct spec objects with
        Python 3.4 (see PEP451)
        �__path__)�hasattrrHrBrrr�
is_package�sz_SixMetaPathImporter.is_packagecCs|�|�dS)z;Return None

        Required, if is_package is implementedN)rHrBrrr�get_code�s
z_SixMetaPathImporter.get_code)N)
r
rrrrrArCrErHrKrNrO�
get_sourcerrrrr;�s
	r;c@seZdZdZgZdS)�_MovedItemszLazy loading of moved objectsN)r
rrrrLrrrrrQ�srQZ	cStringIO�io�StringIO�filter�	itertools�builtinsZifilter�filterfalseZifilterfalse�inputZ__builtin__Z	raw_input�internr�map�imap�getcwd�osZgetcwdu�getcwdb�rangeZxrangeZ
reload_module�	importlibZimp�reload�reduce�	functoolsZshlex_quoteZpipesZshlexZquote�UserDict�collections�UserList�
UserString�zipZizip�zip_longestZizip_longestZconfigparserZConfigParser�copyregZcopy_regZdbm_gnuZgdbmzdbm.gnuZ
_dummy_threadZdummy_threadZhttp_cookiejarZ	cookielibzhttp.cookiejarZhttp_cookiesZCookiezhttp.cookiesZ
html_entitiesZhtmlentitydefsz
html.entitiesZhtml_parserZ
HTMLParserzhtml.parserZhttp_clientZhttplibzhttp.clientZemail_mime_multipartzemail.MIMEMultipartzemail.mime.multipartZemail_mime_nonmultipartzemail.MIMENonMultipartzemail.mime.nonmultipartZemail_mime_textzemail.MIMETextzemail.mime.textZemail_mime_basezemail.MIMEBasezemail.mime.baseZBaseHTTPServerzhttp.serverZ
CGIHTTPServerZSimpleHTTPServerZcPickle�pickleZqueueZQueue�reprlib�reprZsocketserverZSocketServer�_thread�threadZtkinterZTkinterZtkinter_dialogZDialogztkinter.dialogZtkinter_filedialogZ
FileDialogztkinter.filedialogZtkinter_scrolledtextZScrolledTextztkinter.scrolledtextZtkinter_simpledialogZSimpleDialogztkinter.simpledialogZtkinter_tixZTixztkinter.tixZtkinter_ttkZttkztkinter.ttkZtkinter_constantsZTkconstantsztkinter.constantsZtkinter_dndZTkdndztkinter.dndZtkinter_colorchooserZtkColorChooserztkinter.colorchooserZtkinter_commondialogZtkCommonDialogztkinter.commondialogZtkinter_tkfiledialogZtkFileDialogZtkinter_fontZtkFontztkinter.fontZtkinter_messageboxZtkMessageBoxztkinter.messageboxZtkinter_tksimpledialogZtkSimpleDialogZurllib_parsez.moves.urllib_parsezurllib.parseZurllib_errorz.moves.urllib_errorzurllib.errorZurllibz
.moves.urllibZurllib_robotparser�robotparserzurllib.robotparserZ
xmlrpc_clientZ	xmlrpclibz
xmlrpc.clientZ
xmlrpc_serverZSimpleXMLRPCServerz
xmlrpc.serverZwin32�winreg�_winregzmoves.z.moves�movesc@seZdZdZdS)�Module_six_moves_urllib_parsez7Lazy loading of moved objects in six.moves.urllib_parseN�r
rrrrrrrrt@srtZParseResultZurlparseZSplitResultZparse_qsZ	parse_qslZ	urldefragZurljoinZurlsplitZ
urlunparseZ
urlunsplitZ
quote_plusZunquoteZunquote_plusZ	urlencodeZ
splitqueryZsplittagZ	splituserZ
uses_fragmentZuses_netlocZuses_paramsZ
uses_queryZ
uses_relative�moves.urllib_parsezmoves.urllib.parsec@seZdZdZdS)�Module_six_moves_urllib_errorz7Lazy loading of moved objects in six.moves.urllib_errorNrurrrrrwhsrwZURLErrorZurllib2Z	HTTPErrorZContentTooShortErrorz.moves.urllib.error�moves.urllib_errorzmoves.urllib.errorc@seZdZdZdS)�Module_six_moves_urllib_requestz9Lazy loading of moved objects in six.moves.urllib_requestNrurrrrry|sryZurlopenzurllib.requestZinstall_openerZbuild_openerZpathname2urlZurl2pathnameZ
getproxiesZRequestZOpenerDirectorZHTTPDefaultErrorHandlerZHTTPRedirectHandlerZHTTPCookieProcessorZProxyHandlerZBaseHandlerZHTTPPasswordMgrZHTTPPasswordMgrWithDefaultRealmZAbstractBasicAuthHandlerZHTTPBasicAuthHandlerZProxyBasicAuthHandlerZAbstractDigestAuthHandlerZHTTPDigestAuthHandlerZProxyDigestAuthHandlerZHTTPHandlerZHTTPSHandlerZFileHandlerZ
FTPHandlerZCacheFTPHandlerZUnknownHandlerZHTTPErrorProcessorZurlretrieveZ
urlcleanupZ	URLopenerZFancyURLopenerZproxy_bypassz.moves.urllib.request�moves.urllib_requestzmoves.urllib.requestc@seZdZdZdS)� Module_six_moves_urllib_responsez:Lazy loading of moved objects in six.moves.urllib_responseNrurrrrr{�sr{Zaddbasezurllib.responseZaddclosehookZaddinfoZ
addinfourlz.moves.urllib.response�moves.urllib_responsezmoves.urllib.responsec@seZdZdZdS)�#Module_six_moves_urllib_robotparserz=Lazy loading of moved objects in six.moves.urllib_robotparserNrurrrrr}�sr}ZRobotFileParserz.moves.urllib.robotparser�moves.urllib_robotparserzmoves.urllib.robotparserc@sNeZdZdZgZe�d�Ze�d�Ze�d�Z	e�d�Z
e�d�Zdd�Zd	S)
�Module_six_moves_urllibzICreate a six.moves.urllib namespace that resembles the Python 3 namespacervrxrzr|r~cCsdddddgS)N�parse�error�request�responserprr	rrrr8�szModule_six_moves_urllib.__dir__N)
r
rrrrL�	_importerrCr�r�r�r�rpr8rrrrr�s




rzmoves.urllibcCstt|j|�dS)zAdd an item to six.moves.N)rrQr)Zmoverrr�add_move�sr�cCsXztt|�WnDtk
rRztj|=Wn"tk
rLtd|f��YnXYnXdS)zRemove item from six.moves.zno such move, %rN)r rQr"rs�__dict__rFrrrr�remove_move�sr��__func__�__self__�__closure__�__code__�__defaults__�__globals__�im_funcZim_selfZfunc_closureZ	func_codeZ
func_defaultsZfunc_globalscCs|��Sr)�next)�itrrr�advance_iteratorsr�cCstdd�t|�jD��S)Ncss|]}d|jkVqdS)�__call__N)r�)r5�klassrrr�	<genexpr>szcallable.<locals>.<genexpr>)�any�type�__mro__)r#rrr�callablesr�cCs|Srr�Zunboundrrr�get_unbound_functionsr�cCs|Srr�r�clsrrr�create_unbound_methodsr�cCs|jSr)r�r�rrrr�"scCst�|||j�Sr)�types�
MethodTyper!)rr#rrr�create_bound_method%sr�cCst�|d|�Sr)r�r�r�rrrr�(sc@seZdZdd�ZdS)�IteratorcCst|��|�Sr)r��__next__r	rrrr�-sz
Iterator.nextN)r
rrr�rrrrr�+sr�z3Get the function out of a possibly unbound functioncKst|jf|��Sr)�iter�keys��d�kwrrr�iterkeys>sr�cKst|jf|��Sr)r��valuesr�rrr�
itervaluesAsr�cKst|jf|��Sr)r��itemsr�rrr�	iteritemsDsr�cKst|jf|��Sr)r�Zlistsr�rrr�	iterlistsGsr�r�r�r�cKs|jf|�Sr)r�r�rrrr�PscKs|jf|�Sr)r�r�rrrr�SscKs|jf|�Sr)r�r�rrrr�VscKs|jf|�Sr)r�r�rrrr�Ys�viewkeys�
viewvalues�	viewitemsz1Return an iterator over the keys of a dictionary.z3Return an iterator over the values of a dictionary.z?Return an iterator over the (key, value) pairs of a dictionary.zBReturn an iterator over the (key, [values]) pairs of a dictionary.cCs
|�d�S)Nzlatin-1)�encode��srrr�bksr�cCs|Srrr�rrr�unsr�z>B�assertCountEqual�ZassertRaisesRegexpZassertRegexpMatches�assertRaisesRegex�assertRegexcCs|Srrr�rrrr��scCst|�dd�d�S)Nz\\z\\\\Zunicode_escape)�unicode�replacer�rrrr��scCst|d�S)Nr��ord)Zbsrrr�byte2int�sr�cCst||�Srr�)Zbuf�irrr�
indexbytes�sr�ZassertItemsEqualzByte literalzText literalcOst|t�||�Sr)r.�_assertCountEqual�r
�args�kwargsrrrr��scOst|t�||�Sr)r.�_assertRaisesRegexr�rrrr��scOst|t�||�Sr)r.�_assertRegexr�rrrr��s�execcCs*|dkr|�}|j|k	r"|�|��|�dSr)�
__traceback__�with_traceback)r$r1�tbrrr�reraise�s


r�cCsB|dkr*t�d�}|j}|dkr&|j}~n|dkr6|}td�dS)zExecute code in a namespace.Nr�zexec _code_ in _globs_, _locs_)r�	_getframe�	f_globals�f_localsr�)Z_code_Z_globs_Z_locs_�framerrr�exec_�s
r�z9def reraise(tp, value, tb=None):
    raise tp, value, tb
)rrzrdef raise_from(value, from_value):
    if from_value is None:
        raise value
    raise value from from_value
zCdef raise_from(value, from_value):
    raise value from from_value
cCs|�dSrr)r1Z
from_valuerrr�
raise_from�sr��printc
s.|�dtj���dkrdS�fdd�}d}|�dd�}|dk	r`t|t�rNd}nt|t�s`td��|�d	d�}|dk	r�t|t�r�d}nt|t�s�td
��|r�td��|s�|D]}t|t�r�d}q�q�|r�td�}td
�}nd}d
}|dkr�|}|dkr�|}t|�D] \}	}|	�r||�||��q||�dS)z4The new-style print function for Python 2.4 and 2.5.�fileNcsdt|t�st|�}t�t�rVt|t�rV�jdk	rVt�dd�}|dkrHd}|��j|�}��|�dS)N�errors�strict)	rI�
basestring�strr�r��encodingr.r��write)�datar���fprrr��s

��zprint_.<locals>.writeF�sepTzsep must be None or a string�endzend must be None or a stringz$invalid keyword arguments to print()�
� )�popr�stdoutrIr�r��	TypeError�	enumerate)
r�r�r�Zwant_unicoder�r��arg�newlineZspacer�rr�r�print_�sL





r�)rrcOs<|�dtj�}|�dd�}t||�|r8|dk	r8|��dS)Nr��flushF)�getrr�r��_printr�)r�r�r�r�rrrr�s

zReraise an exception.cs���fdd�}|S)Ncst�����|�}�|_|Sr)rc�wraps�__wrapped__)�f��assigned�updated�wrappedrr�wrapperszwraps.<locals>.wrapperr)r�r�r�r�rr�rr�sr�cs&G��fdd�d��}t�|ddi�S)z%Create a base class with a metaclass.cseZdZ��fdd�ZdS)z!with_metaclass.<locals>.metaclasscs�|�|�Srr)r�rZ
this_basesr���bases�metarr�__new__'sz)with_metaclass.<locals>.metaclass.__new__N)r
rrr�rr�rr�	metaclass%sr�Ztemporary_classr)r�r�)r�r�r�rr�r�with_metaclass sr�cs�fdd�}|S)z6Class decorator for creating a class with a metaclass.csh|j��}|�d�}|dk	r@t|t�r,|g}|D]}|�|�q0|�dd�|�dd��|j|j|�S)N�	__slots__r��__weakref__)r��copyr�rIr�r�r
�	__bases__)r�Z	orig_vars�slotsZ	slots_var�r�rrr�.s


zadd_metaclass.<locals>.wrapperr)r�r�rr�r�
add_metaclass,sr�cCs2tr.d|jkrtd|j��|j|_dd�|_|S)a
    A decorator that defines __unicode__ and __str__ methods under Python 2.
    Under Python 3 it does nothing.

    To support Python 2 and 3 with a single code base, define a __str__ method
    returning text and apply this decorator to the class.
    �__str__zY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cSs|���d�S)Nzutf-8)�__unicode__r�r	rrr�<lambda>J�z-python_2_unicode_compatible.<locals>.<lambda>)�PY2r��
ValueErrorr
rr)r�rrr�python_2_unicode_compatible<s
�
r�__spec__)N)NN)�rZ
__future__rrcrU�operatorrr��
__author__�__version__�version_inforr)ZPY34r�Zstring_types�intZ
integer_typesr�Zclass_typesZ	text_type�bytesZbinary_type�maxsizeZMAXSIZEr�ZlongZ	ClassTyper��platform�
startswith�objectr�len�
OverflowErrorrrrr'�
ModuleTyper4r9r;r
r�rQr7r/rrrIrArsrtZ_urllib_parse_moved_attributesrwZ_urllib_error_moved_attributesryZ _urllib_request_moved_attributesr{Z!_urllib_response_moved_attributesr}Z$_urllib_robotparser_moved_attributesrr�r�Z
_meth_funcZ
_meth_selfZ
_func_closureZ
_func_codeZ_func_defaultsZ
_func_globalsr�r��	NameErrorr�r�r�r�r�r��
attrgetterZget_method_functionZget_method_selfZget_function_closureZget_function_codeZget_function_defaultsZget_function_globalsr�r�r�r��methodcallerr�r�r�r�r��chrZunichr�struct�Struct�packZint2byte�
itemgetterr��getitemr�r�Z	iterbytesrRrS�BytesIOr�r�r��partialr[r�r�r�r�r.rVr�r�r�r�r��WRAPPER_ASSIGNMENTS�WRAPPER_UPDATESr�r�r�rrL�__package__�globalsr�r�submodule_search_locations�	meta_pathr�r�Zimporter�appendrrrr�<module>s�

>





























��


�


�D�






















��


��
































�#�����
��





��



5��
PK�V[�@sr��,_vendor/__pycache__/pyparsing.cpython-38.pycnu�[���U

�Qabw��i@s�dZdZdZdZddlZddlmZddlZddl	Z	ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlmZzddlmZWn ek
r�ddlmZYnXzdd	lmZdd
lmZWn,ek
r�dd	l
mZdd
l
mZYnXzddl
mZWnBek
�rFzddlmZWnek
�r@dZYnXYnXdd
ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtgiZee	j�ddu�ZeddukZ e �rpe	j!Z"e#Z$e%Z&e#Z'e(e)e*e+e,ee-e.e/e0e1gZ2n`e	j3Z"e4Z5dvdw�Z'gZ2ddl6Z6dx�7�D]8Z8ze2�9e:e6e8��Wne;k
�r�Y�q�YnX�q�e<dydz�e5d{�D��Z=d|d}�Z>Gd~d�de?�Z@ejAejBZCd�ZDeDd�ZEeCeDZFe%d��ZGd��Hd�dz�ejID��ZJGd�d#�d#eK�ZLGd�d%�d%eL�ZMGd�d'�d'eL�ZNGd�d)�d)eN�ZOGd�d,�d,eK�ZPGd�d��d�e?�ZQGd�d(�d(e?�ZRe�SeR�d�d?�ZTd�dP�ZUd�dM�ZVd�d��ZWd�d��ZXd�d��ZYd�dW�ZZ�d/d�d��Z[Gd�d*�d*e?�Z\Gd�d2�d2e\�Z]Gd�d�de]�Z^Gd�d�de]�Z_Gd�d�de]�Z`e`Zae`e\_bGd�d�de]�ZcGd�d�de`�ZdGd�d
�d
ec�ZeGd�dr�dre]�ZfGd�d5�d5e]�ZgGd�d-�d-e]�ZhGd�d+�d+e]�ZiGd�d�de]�ZjGd�d4�d4e]�ZkGd�d��d�e]�ZlGd�d�del�ZmGd�d�del�ZnGd�d�del�ZoGd�d0�d0el�ZpGd�d/�d/el�ZqGd�d7�d7el�ZrGd�d6�d6el�ZsGd�d&�d&e\�ZtGd�d�det�ZuGd�d"�d"et�ZvGd�d�det�ZwGd�d�det�ZxGd�d$�d$e\�ZyGd�d�dey�ZzGd�d�dey�Z{Gd�d��d�ey�Z|Gd�d�de|�Z}Gd�d8�d8e|�Z~Gd�d��d�e?�Ze�Z�Gd�d!�d!ey�Z�Gd�d.�d.ey�Z�Gd�d�dey�Z�Gd�dÄd�e��Z�Gd�d3�d3ey�Z�Gd�d�de��Z�Gd�d�de��Z�Gd�d�de��Z�Gd�d1�d1e��Z�Gd�d �d e?�Z�d�dh�Z��d0d�dF�Z��d1d�dB�Z�d�dЄZ�d�dU�Z�d�dT�Z�d�dԄZ��d2d�dY�Z�d�dG�Z��d3d�dm�Z�d�dn�Z�d�dp�Z�e^���dI�Z�en���dO�Z�eo���dN�Z�ep���dg�Z�eq���df�Z�egeGd�d�d܍��d�dބ�Z�ehd߃��d�dބ�Z�ehd���d�dބ�Z�e�e�Be�Bejd�d{d܍BZ�e�e�e�d�e��Z�e`d�e�d���d�e�e}e�e�B����d�d�Z�d�de�Z�d�dS�Z�d�db�Z�d�d`�Z�d�ds�Z�e�d�dބ�Z�e�d�dބ�Z�d�d�Z�d�dQ�Z�d�dR�Z�d�dk�Z�e?�e�_��d4d�dq�Z�e@�Z�e?�e�_�e?�e�_�e�d��e�d��fd�do�Z�e�Z�e�ehd��d����d��Z�e�ehd��d����d��Z�e�ehd��d�ehd��d�B����d�Z�e�ea�d�e�������d�Z�d�d�de���f�ddV�Z��d5�ddl�Z�e��d�Z�e��d�Z�e�egeCeF�d����d��\Z�Z�e�ed	�7��d
��Z�eh�d�d�Heàġ��d
����d�ZŐdda�Z�e�eh�d��d����d�Z�eh�d����d�Z�eh�d��ɡ���d�Z�eh�d����d�Z�e�eh�d��de�B����d�Z�e�Z�eh�d����d�Z�e�e}egeJdːd�e�eg�d�e`d˃eo�����ϡ���d�Z�e�e�e���e�Bd��d����d@�Z�G�d dt�dt�Z�eӐd!k�r�ed�d"�Z�ed�d#�Z�egeCeF�d$�Z�e�e֐d%dՐd&���e��Z�e�e�e׃����d'�Zؐd(e�BZ�e�e֐d%dՐd&���e��Z�e�e�eڃ����d)�Z�eԐd*�eِd'�e�eېd)�Z�eܠݐd+�e�jޠݐd,�e�jߠݐd,�e�j�ݐd-�ddl�Z�e�j᠝e�e�j��e�j�ݐd.�dS(6a�	
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================

The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.

Here is a program to parse "Hello, World!" (or any greeting of the form 
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::

    from pyparsing import Word, alphas

    # define grammar of a greeting
    greet = Word(alphas) + "," + Word(alphas) + "!"

    hello = "Hello, World!"
    print (hello, "->", greet.parseString(hello))

The program outputs the following::

    Hello, World! -> ['Hello', ',', 'World', '!']

The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.

The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.

The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
 - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
 - quoted strings
 - embedded comments


Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
 - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
 - construct character word-group expressions using the L{Word} class
 - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
 - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
 - associate names with your parsed results using L{ParserElement.setResultsName}
 - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
 - find more useful common expressions in the L{pyparsing_common} namespace class
z2.2.1z18 Sep 2018 00:49 UTCz*Paul McGuire <ptmcg@users.sourceforge.net>�N)�ref)�datetime)�RLock)�Iterable)�MutableMapping)�OrderedDict�And�CaselessKeyword�CaselessLiteral�
CharsNotIn�Combine�Dict�Each�Empty�
FollowedBy�Forward�
GoToColumn�Group�Keyword�LineEnd�	LineStart�Literal�
MatchFirst�NoMatch�NotAny�	OneOrMore�OnlyOnce�Optional�Or�ParseBaseException�ParseElementEnhance�ParseException�ParseExpression�ParseFatalException�ParseResults�ParseSyntaxException�
ParserElement�QuotedString�RecursiveGrammarException�Regex�SkipTo�	StringEnd�StringStart�Suppress�Token�TokenConverter�White�Word�WordEnd�	WordStart�
ZeroOrMore�	alphanums�alphas�
alphas8bit�anyCloseTag�
anyOpenTag�
cStyleComment�col�commaSeparatedList�commonHTMLEntity�countedArray�cppStyleComment�dblQuotedString�dblSlashComment�
delimitedList�dictOf�downcaseTokens�empty�hexnums�htmlComment�javaStyleComment�line�lineEnd�	lineStart�lineno�makeHTMLTags�makeXMLTags�matchOnlyAtCol�matchPreviousExpr�matchPreviousLiteral�
nestedExpr�nullDebugAction�nums�oneOf�opAssoc�operatorPrecedence�
printables�punc8bit�pythonStyleComment�quotedString�removeQuotes�replaceHTMLEntity�replaceWith�
restOfLine�sglQuotedString�srange�	stringEnd�stringStart�traceParseAction�
unicodeString�upcaseTokens�
withAttribute�
indentedBlock�originalTextFor�ungroup�
infixNotation�locatedExpr�	withClass�
CloseMatch�tokenMap�pyparsing_common�cCsft|t�r|Sz
t|�WStk
r`t|��t��d�}td�}|�dd��|�	|�YSXdS)aDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
           then < returns the unicode object | encodes it with the default encoding | ... >.
        �xmlcharrefreplacez&#\d+;cSs$dtt|ddd���dd�S)Nz\ur����)�hex�int��t�ry�C/usr/lib/python3.8/site-packages/pkg_resources/_vendor/pyparsing.py�<lambda>��z_ustr.<locals>.<lambda>N)
�
isinstanceZunicode�str�UnicodeEncodeError�encode�sys�getdefaultencodingr)�setParseAction�transformString)�obj�retZ
xmlcharrefryryrz�_ustr�s

r�z6sum len sorted reversed list tuple set any all min maxccs|]
}|VqdS�Nry)�.0�yryryrz�	<genexpr>�sr��cCs:d}dd�d��D�}t||�D]\}}|�||�}q |S)z/Escape &, <, >, ", ', etc. in a string of data.z&><"'css|]}d|dVqdS)�&�;Nry)r��sryryrzr��sz_xml_escape.<locals>.<genexpr>zamp gt lt quot apos)�split�zip�replace)�dataZfrom_symbolsZ
to_symbolsZfrom_Zto_ryryrz�_xml_escape�s
r�c@seZdZdS)�
_ConstantsN)�__name__�
__module__�__qualname__ryryryrzr��sr��
0123456789ZABCDEFabcdef�\�ccs|]}|tjkr|VqdSr�)�stringZ
whitespace�r��cryryrzr��s
c@sPeZdZdZddd�Zedd��Zdd	�Zd
d�Zdd
�Z	ddd�Z
dd�ZdS)rz7base exception class for all parsing runtime exceptionsrNcCs>||_|dkr||_d|_n||_||_||_|||f|_dS�Nr�)�loc�msg�pstr�
parserElement�args)�selfr�r�r��elemryryrz�__init__�szParseBaseException.__init__cCs||j|j|j|j�S)z�
        internal factory method to simplify creating one type of ParseException 
        from another - avoids having __init__ signature conflicts among subclasses
        )r�r�r�r�)�cls�peryryrz�_from_exception�sz"ParseBaseException._from_exceptioncCsN|dkrt|j|j�S|dkr,t|j|j�S|dkrBt|j|j�St|��dS)z�supported attributes by name are:
            - lineno - returns the line number of the exception text
            - col - returns the column number of the exception text
            - line - returns the line containing the exception text
        rL)r;�columnrIN)rLr�r�r;rI�AttributeError)r�Zanameryryrz�__getattr__�szParseBaseException.__getattr__cCsd|j|j|j|jfS)Nz"%s (at char %d), (line:%d, col:%d))r�r�rLr��r�ryryrz�__str__�s�zParseBaseException.__str__cCst|�Sr��r�r�ryryrz�__repr__�szParseBaseException.__repr__�>!<cCs<|j}|jd}|r4d�|d|�|||d�f�}|��S)z�Extracts the exception line from the input string, and marks
           the location of the exception with a special symbol.
        r�r�N)rIr��join�strip)r�ZmarkerStringZline_strZline_columnryryrz�
markInputline�s

�z ParseBaseException.markInputlinecCsd��tt|��S)Nzlineno col line)r��dir�typer�ryryrz�__dir__szParseBaseException.__dir__)rNN)r�)r�r�r��__doc__r��classmethodr�r�r�r�r�r�ryryryrzr�s



c@seZdZdZdS)r!aN
    Exception thrown when parse expressions don't match class;
    supported attributes by name are:
     - lineno - returns the line number of the exception text
     - col - returns the column number of the exception text
     - line - returns the line containing the exception text
        
    Example::
        try:
            Word(nums).setName("integer").parseString("ABC")
        except ParseException as pe:
            print(pe)
            print("column: {}".format(pe.col))
            
    prints::
       Expected integer (at char 0), (line:1, col:1)
        column: 1
    N�r�r�r�r�ryryryrzr!sc@seZdZdZdS)r#znuser-throwable exception thrown when inconsistent parse content
       is found; stops all parsing immediatelyNr�ryryryrzr#sc@seZdZdZdS)r%z�just like L{ParseFatalException}, but thrown internally when an
       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 
       immediately because an unbacktrackable syntax error has been foundNr�ryryryrzr%sc@s eZdZdZdd�Zdd�ZdS)r(zZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs
||_dSr��ZparseElementTrace�r��parseElementListryryrzr�4sz"RecursiveGrammarException.__init__cCs
d|jS)NzRecursiveGrammarException: %sr�r�ryryrzr�7sz!RecursiveGrammarException.__str__N)r�r�r�r�r�r�ryryryrzr(2sc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�_ParseResultsWithOffsetcCs||f|_dSr���tup)r�Zp1Zp2ryryrzr�;sz _ParseResultsWithOffset.__init__cCs
|j|Sr�r��r��iryryrz�__getitem__=sz#_ParseResultsWithOffset.__getitem__cCst|jd�S�Nr)�reprr�r�ryryrzr�?sz _ParseResultsWithOffset.__repr__cCs|jd|f|_dSr�r�r�ryryrz�	setOffsetAsz!_ParseResultsWithOffset.setOffsetN)r�r�r�r�r�r�r�ryryryrzr�:sr�c@s�eZdZdZd[dd�Zddddefdd�Zdd	�Zefd
d�Zdd
�Z	dd�Z
dd�Zdd�ZeZ
dd�Zdd�Zdd�Zdd�Zdd�Zer�eZeZeZn$eZeZeZdd�Zd d!�Zd"d#�Zd$d%�Zd&d'�Zd\d(d)�Zd*d+�Zd,d-�Zd.d/�Zd0d1�Z d2d3�Z!d4d5�Z"d6d7�Z#d8d9�Z$d:d;�Z%d<d=�Z&d]d?d@�Z'dAdB�Z(dCdD�Z)dEdF�Z*d^dHdI�Z+dJdK�Z,dLdM�Z-d_dOdP�Z.dQdR�Z/dSdT�Z0dUdV�Z1dWdX�Z2dYdZ�Z3dS)`r$aI
    Structured parse results, to provide multiple means of access to the parsed data:
       - as a list (C{len(results)})
       - by list index (C{results[0], results[1]}, etc.)
       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})

    Example::
        integer = Word(nums)
        date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))
        # equivalent form:
        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

        # parseString returns a ParseResults object
        result = date_str.parseString("1999/12/31")

        def test(s, fn=repr):
            print("%s -> %s" % (s, fn(eval(s))))
        test("list(result)")
        test("result[0]")
        test("result['month']")
        test("result.day")
        test("'month' in result")
        test("'minutes' in result")
        test("result.dump()", str)
    prints::
        list(result) -> ['1999', '/', '12', '/', '31']
        result[0] -> '1999'
        result['month'] -> '12'
        result.day -> '31'
        'month' in result -> True
        'minutes' in result -> False
        result.dump() -> ['1999', '/', '12', '/', '31']
        - day: 31
        - month: 12
        - year: 1999
    NTcCs"t||�r|St�|�}d|_|S�NT)r}�object�__new__�_ParseResults__doinit)r��toklist�name�asList�modalZretobjryryrzr�ks


zParseResults.__new__c
Csb|jrvd|_d|_d|_i|_||_||_|dkr6g}||t�rP|dd�|_n||t�rft|�|_n|g|_t	�|_
|dk	�r^|�r^|s�d|j|<||t�r�t|�}||_||t
d�ttf�r�|ddgfk�s^||t�r�|g}|�r(||t��rt|��d�||<ntt|d�d�||<|||_n6z|d||<Wn$tttfk
�r\|||<YnXdS)NFrr�)r��_ParseResults__name�_ParseResults__parent�_ParseResults__accumNames�_ParseResults__asList�_ParseResults__modal�list�_ParseResults__toklist�_generatorType�dict�_ParseResults__tokdictrvr�r��
basestringr$r��copy�KeyError�	TypeError�
IndexError)r�r�r�r�r�r}ryryrzr�tsB



$
zParseResults.__init__cCsPt|ttf�r|j|S||jkr4|j|ddStdd�|j|D��SdS)NrtrcSsg|]}|d�qS�rry�r��vryryrz�
<listcomp>�sz,ParseResults.__getitem__.<locals>.<listcomp>)r}rv�slicer�r�r�r$r�ryryrzr��s


zParseResults.__getitem__cCs�||t�r0|j�|t��|g|j|<|d}nD||ttf�rN||j|<|}n&|j�|t��t|d�g|j|<|}||t�r�t|�|_	dSr�)
r�r��getr�rvr�r�r$�wkrefr�)r��kr�r}�subryryrz�__setitem__�s


"
zParseResults.__setitem__c
Cs�t|ttf�r�t|j�}|j|=t|t�rH|dkr:||7}t||d�}tt|�|���}|��|j	�
�D]>\}}|D]0}t|�D]"\}\}}	t||	|	|k�||<q�qxqln|j	|=dS�Nrr�)
r}rvr��lenr�r��range�indices�reverser��items�	enumerater�)
r�r�ZmylenZremovedr��occurrences�jr��value�positionryryrz�__delitem__�s

zParseResults.__delitem__cCs
||jkSr�)r�)r�r�ryryrz�__contains__�szParseResults.__contains__cCs
t|j�Sr�)r�r�r�ryryrz�__len__�r|zParseResults.__len__cCs
|jSr��r�r�ryryrz�__bool__�r|zParseResults.__bool__cCs
t|j�Sr���iterr�r�ryryrz�__iter__�r|zParseResults.__iter__cCst|jddd��S�Nrtr�r�ryryrz�__reversed__�r|zParseResults.__reversed__cCs$t|jd�r|j��St|j�SdS)N�iterkeys)�hasattrr�r�r�r�ryryrz�	_iterkeys�s
zParseResults._iterkeyscs�fdd����D�S)Nc3s|]}�|VqdSr�ry�r�r�r�ryrzr��sz+ParseResults._itervalues.<locals>.<genexpr>�rr�ryr�rz�_itervalues�szParseResults._itervaluescs�fdd����D�S)Nc3s|]}|�|fVqdSr�ryrr�ryrzr��sz*ParseResults._iteritems.<locals>.<genexpr>rr�ryr�rz�
_iteritems�szParseResults._iteritemscCst|���S)zVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).)r�r�r�ryryrz�keys�szParseResults.keyscCst|���S)zXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).)r��
itervaluesr�ryryrz�values�szParseResults.valuescCst|���S)zfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).)r��	iteritemsr�ryryrzr��szParseResults.itemscCs
t|j�S)z�Since keys() returns an iterator, this method is helpful in bypassing
           code that looks for the existence of any defined results names.)�boolr�r�ryryrz�haskeys�szParseResults.haskeyscOs�|s
dg}|��D]*\}}|dkr0|d|f}qtd|��qt|dt�sdt|�dksd|d|kr~|d}||}||=|S|d}|SdS)a�
        Removes and returns item at specified index (default=C{last}).
        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
        argument or an integer argument, it will use C{list} semantics
        and pop tokens from the list of parsed tokens. If passed a 
        non-integer argument (most likely a string), it will use C{dict}
        semantics and pop the corresponding value from any defined 
        results names. A second default return value argument is 
        supported, just as in C{dict.pop()}.

        Example::
            def remove_first(tokens):
                tokens.pop(0)
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

            label = Word(alphas)
            patt = label("LABEL") + OneOrMore(Word(nums))
            print(patt.parseString("AAB 123 321").dump())

            # Use pop() in a parse action to remove named result (note that corresponding value is not
            # removed from list form of results)
            def remove_LABEL(tokens):
                tokens.pop("LABEL")
                return tokens
            patt.addParseAction(remove_LABEL)
            print(patt.parseString("AAB 123 321").dump())
        prints::
            ['AAB', '123', '321']
            - LABEL: AAB

            ['AAB', '123', '321']
        rt�defaultrz-pop() got an unexpected keyword argument '%s'r�N)r�r�r}rvr�)r�r��kwargsr�r��indexr�Zdefaultvalueryryrz�pop�s""
�
�zParseResults.popcCs||kr||S|SdS)ai
        Returns named result matching the given key, or if there is no
        such name, then returns the given C{defaultValue} or C{None} if no
        C{defaultValue} is specified.

        Similar to C{dict.get()}.
        
        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            result = date_str.parseString("1999/12/31")
            print(result.get("year")) # -> '1999'
            print(result.get("hour", "not specified")) # -> 'not specified'
            print(result.get("hour")) # -> None
        Nry)r��key�defaultValueryryrzr�3szParseResults.getcCsR|j�||�|j��D]4\}}t|�D]"\}\}}t||||k�||<q(qdS)a
        Inserts new element at location index in the list of parsed tokens.
        
        Similar to C{list.insert()}.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

            # use a parse action to insert the parse location in the front of the parsed results
            def insert_locn(locn, tokens):
                tokens.insert(0, locn)
            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
        N)r��insertr�r�r�r�)r�r
ZinsStrr�r�r�r�r�ryryrzrIszParseResults.insertcCs|j�|�dS)a�
        Add single element to end of ParseResults list of elements.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            
            # use a parse action to compute the sum of the parsed integers, and add it to the end
            def append_sum(tokens):
                tokens.append(sum(map(int, tokens)))
            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
        N)r��append)r��itemryryrzr]szParseResults.appendcCs$t|t�r||7}n|j�|�dS)a
        Add sequence of elements to end of ParseResults list of elements.

        Example::
            patt = OneOrMore(Word(alphas))
            
            # use a parse action to append the reverse of the matched strings, to make a palindrome
            def make_palindrome(tokens):
                tokens.extend(reversed([t[::-1] for t in tokens]))
                return ''.join(tokens)
            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
        N)r}r$r��extend)r�Zitemseqryryrzrks

zParseResults.extendcCs|jdd�=|j��dS)z7
        Clear all elements and results names.
        N)r�r��clearr�ryryrzr}szParseResults.clearcCsjz
||WStk
r YdSX||jkrb||jkrH|j|ddStdd�|j|D��SndSdS)Nr�rtrcSsg|]}|d�qSr�ryr�ryryrzr��sz,ParseResults.__getattr__.<locals>.<listcomp>)r�r�r�r$�r�r�ryryrzr��s


zParseResults.__getattr__cCs|��}||7}|Sr��r�)r��otherr�ryryrz�__add__�szParseResults.__add__cs�|jrjt|j���fdd��|j��}�fdd�|D�}|D],\}}|||<t|dt�r<t|�|d_q<|j|j7_|j�	|j�|S)Ncs|dkr�S|�Sr�ry)�a)�offsetryrzr{�r|z'ParseResults.__iadd__.<locals>.<lambda>c	s4g|],\}}|D]}|t|d�|d��f�qqS�rr�)r��r�r��vlistr�)�	addoffsetryrzr��s�z)ParseResults.__iadd__.<locals>.<listcomp>r)
r�r�r�r�r}r$r�r�r��update)r�rZ
otheritemsZotherdictitemsr�r�ry)rrrz�__iadd__�s


�zParseResults.__iadd__cCs&t|t�r|dkr|��S||SdSr�)r}rvr��r�rryryrz�__radd__�szParseResults.__radd__cCsdt|j�t|j�fS)Nz(%s, %s))r�r�r�r�ryryrzr��szParseResults.__repr__cCsdd�dd�|jD��dS)N�[�, css(|] }t|t�rt|�nt|�VqdSr�)r}r$r�r��r�r�ryryrzr��sz'ParseResults.__str__.<locals>.<genexpr>�])r�r�r�ryryrzr��szParseResults.__str__r�cCsLg}|jD]<}|r |r |�|�t|t�r8||��7}q
|�t|��q
|Sr�)r�rr}r$�
_asStringListr�)r��sep�outrryryrzr(�s


zParseResults._asStringListcCsdd�|jD�S)a�
        Returns the parse results as a nested list of matching tokens, all converted to strings.

        Example::
            patt = OneOrMore(Word(alphas))
            result = patt.parseString("sldkj lsdkj sldkj")
            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
            
            # Use asList() to create an actual list
            result_list = result.asList()
            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
        cSs"g|]}t|t�r|��n|�qSry)r}r$r�)r��resryryrzr��sz'ParseResults.asList.<locals>.<listcomp>r�r�ryryrzr��szParseResults.asListcs6tr|j}n|j}�fdd��t�fdd�|�D��S)a�
        Returns the named parse results as a nested dictionary.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
            
            result_dict = result.asDict()
            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}

            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
            import json
            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
        cs6t|t�r.|��r|��S�fdd�|D�Sn|SdS)Ncsg|]}�|��qSryryr���toItemryrzr��sz7ParseResults.asDict.<locals>.toItem.<locals>.<listcomp>)r}r$r
�asDict)r�r,ryrzr-�s

z#ParseResults.asDict.<locals>.toItemc3s|]\}}|�|�fVqdSr�ry�r�r�r�r,ryrzr��sz&ParseResults.asDict.<locals>.<genexpr>)�PY_3r�rr�)r�Zitem_fnryr,rzr.�s
	zParseResults.asDictcCs8t|j�}|j��|_|j|_|j�|j�|j|_|S)zA
        Returns a new copy of a C{ParseResults} object.
        )r$r�r�r�r�r�r r��r�r�ryryrzr��s
zParseResults.copyFcCsLd}g}tdd�|j��D��}|d}|s8d}d}d}d}	|dk	rJ|}	n|jrV|j}	|	sf|rbdSd}	|||d|	d	g7}t|j�D]�\}
}t|t�r�|
|kr�||�||
|o�|dk||�g7}n||�d|o�|dk||�g7}q�d}|
|kr�||
}|�s|�rq�nd}t	t
|��}
|||d|d	|
d
|d	g	7}q�|||d
|	d	g7}d�|�S)z�
        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
        �
css(|] \}}|D]}|d|fVqqdS�r�Nryrryryrzr�s�z%ParseResults.asXML.<locals>.<genexpr>�  r�NZITEM�<�>�</)r�r�r�r�r�r�r}r$�asXMLr�r�r�)r�ZdoctagZnamedItemsOnly�indentZ	formatted�nlr*Z
namedItemsZnextLevelIndentZselfTagr�r+ZresTagZxmlBodyTextryryrzr8�s^

�

�
�zParseResults.asXMLcCs:|j��D]*\}}|D]\}}||kr|Sqq
dSr�)r�r�)r�r�r�rr�r�ryryrzZ__lookup;s
zParseResults.__lookupcCs�|jr|jS|jr.|��}|r(|�|�SdSnNt|�dkrxt|j�dkrxtt|j����dddkrxtt|j����SdSdS)a(
        Returns the results name for this token expression. Useful when several 
        different expressions might match at a particular location.

        Example::
            integer = Word(nums)
            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
            house_number_expr = Suppress('#') + Word(nums, alphanums)
            user_data = (Group(house_number_expr)("house_number") 
                        | Group(ssn_expr)("ssn")
                        | Group(integer)("age"))
            user_info = OneOrMore(user_data)
            
            result = user_info.parseString("22 111-22-3333 #221B")
            for item in result:
                print(item.getName(), ':', item[0])
        prints::
            age : 22
            ssn : 111-22-3333
            house_number : 221B
        Nr�r)rrt)	r�r��_ParseResults__lookupr�r��nextr�rr)r��parryryrz�getNameBs
��zParseResults.getNamercCsZg}d}|�|t|����|�rP|��r�tdd�|��D��}|D]r\}}|r\|�|�|�d|d||f�t|t�r�|r�|�|�||d��q�|�t|��qF|�t	|��qFn�t
dd�|D���rP|}t|�D]r\}	}
t|
t��r$|�d|d||	|d|d|
�||d�f�q�|�d|d||	|d|dt|
�f�q�d	�|�S)
aH
        Diagnostic method for listing out the contents of a C{ParseResults}.
        Accepts an optional C{indent} argument so that this string can be embedded
        in a nested display of other data.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(result.dump())
        prints::
            ['12', '/', '31', '/', '1999']
            - day: 1999
            - month: 31
            - year: 12
        r2css|]\}}t|�|fVqdSr�)r~r/ryryrzr�~sz$ParseResults.dump.<locals>.<genexpr>z
%s%s- %s: r4r�css|]}t|t�VqdSr�)r}r$)r��vvryryrzr��sz
%s%s[%d]:
%s%s%sr�)
rr�r�r
�sortedr�r}r$�dumpr��anyr�r�)r�r9�depth�fullr*�NLr�r�r�r�r?ryryrzrAgs,

4,zParseResults.dumpcOstj|��f|�|�dS)a�
        Pretty-printer for parsed results as a list, using the C{pprint} module.
        Accepts additional positional or keyword args as defined for the 
        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

        Example::
            ident = Word(alphas, alphanums)
            num = Word(nums)
            func = Forward()
            term = ident | num | Group('(' + func + ')')
            func <<= ident + Group(Optional(delimitedList(term)))
            result = func.parseString("fna a,b,(fnb c,d,200),100")
            result.pprint(width=40)
        prints::
            ['fna',
             ['a',
              'b',
              ['(', 'fnb', ['c', 'd', '200'], ')'],
              '100']]
        N)�pprintr��r�r�rryryrzrF�szParseResults.pprintcCs.|j|j��|jdk	r|��p d|j|jffSr�)r�r�r�r�r�r�r�ryryrz�__getstate__�s��zParseResults.__getstate__cCsN|d|_|d\|_}}|_i|_|j�|�|dk	rDt|�|_nd|_dSr�)r�r�r�r�r r�r�)r��stater=ZinAccumNamesryryrz�__setstate__�s
�zParseResults.__setstate__cCs|j|j|j|jfSr�)r�r�r�r�r�ryryrz�__getnewargs__�szParseResults.__getnewargs__cCstt|��t|���Sr�)r�r�r�rr�ryryrzr��szParseResults.__dir__)NNTT)N)r�)NFr�T)r�rT)4r�r�r�r�r�r}r�r�r�r�r�r�r��__nonzero__r�r�rrrr0rrr�r�rrr
rr�rrrrr�rr!r#r�r�r(r�r.r�r8r;r>rArFrHrJrKr�ryryryrzr$Dsh&
	'	
4

#
=%
-
cCsF|}d|krt|�kr4nn||ddkr4dS||�dd|�S)aReturns current column within a string, counting newlines as line separators.
   The first column is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   rr�r2)r��rfind)r��strgr�ryryrzr;�s
cCs|�dd|�dS)aReturns current line number within a string, counting newlines as line separators.
   The first line is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   r2rr�)�count)r�rNryryrzrL�s
cCsF|�dd|�}|�d|�}|dkr2||d|�S||dd�SdS)zfReturns the line of text containing loc within a string, counting newlines as line separators.
       r2rr�N)rM�find)r�rNZlastCRZnextCRryryrzrI�s
cCs8tdt|�dt|�dt||�t||�f�dS)NzMatch z at loc z(%d,%d))�printr�rLr;)�instringr��exprryryrz�_defaultStartDebugAction�srTcCs$tdt|�dt|����dS)NzMatched z -> )rQr�r~r�)rR�startlocZendlocrS�toksryryrz�_defaultSuccessDebugAction�srWcCstdt|��dS)NzException raised:)rQr�)rRr�rS�excryryrz�_defaultExceptionDebugAction�srYcGsdS)zG'Do-nothing' debug action, to suppress debugging output during parsing.Nry)r�ryryrzrS�srscs��tkr�fdd�Sdg�dg�tdd�dkrFddd�}dd	d
��ntj}tj�d}|dd�d
}|d|d|f�������fdd�}d}zt�dt�d�j�}Wntk
r�t��}YnX||_|S)Ncs�|�Sr�ry�r��lrx)�funcryrzr{r|z_trim_arity.<locals>.<lambda>rFrs)rq�cSs8tdkrdnd}tj||dd�|}|dd�gS)N)rqr]r������r���limitrs)�system_version�	traceback�
extract_stack)rar�
frame_summaryryryrzrdsz"_trim_arity.<locals>.extract_stackcSs$tj||d�}|d}|dd�gS)Nr`rtrs)rc�
extract_tb)�tbraZframesreryryrzrfsz_trim_arity.<locals>.extract_tb�r`rtr�c	s�z"�|�dd��}d�d<|WStk
r��dr>�n4z.t��d}�|dd�ddd��ksj�W5~X�d�kr��dd7<Yq�YqXqdS)NrTrtrsr`r�)r�r��exc_info)r�r�rg�rfZ
foundArityr\ra�maxargsZpa_call_line_synthryrz�wrapper-s z_trim_arity.<locals>.wrapperz<parse action>r��	__class__)r)r)	�singleArgBuiltinsrbrcrdrf�getattrr��	Exceptionr~)r\rkrdZ	LINE_DIFFZ	this_linerl�	func_nameryrjrz�_trim_aritys,

�rrcs�eZdZdZdZdZedd��Zedd��Zd�dd	�Z	d
d�Z
dd
�Zd�dd�Zd�dd�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zd�dd �Zd!d"�Zd�d#d$�Zd%d&�Zd'd(�ZGd)d*�d*e�Zed+k	r�Gd,d-�d-e�ZnGd.d-�d-e�ZiZe�Zd/d/gZ d�d0d1�Z!eZ"ed2d3��Z#dZ$ed�d5d6��Z%d�d7d8�Z&e'dfd9d:�Z(d;d<�Z)e'fd=d>�Z*e'dfd?d@�Z+dAdB�Z,dCdD�Z-dEdF�Z.dGdH�Z/dIdJ�Z0dKdL�Z1dMdN�Z2dOdP�Z3dQdR�Z4dSdT�Z5dUdV�Z6dWdX�Z7dYdZ�Z8d�d[d\�Z9d]d^�Z:d_d`�Z;dadb�Z<dcdd�Z=dedf�Z>dgdh�Z?d�didj�Z@dkdl�ZAdmdn�ZBdodp�ZCdqdr�ZDgfdsdt�ZEd�dudv�ZF�fdwdx�ZGdydz�ZHd{d|�ZId}d~�ZJdd��ZKd�d�d��ZLd�d�d��ZM�ZNS)�r&z)Abstract base level parser element class.z 
	
FcCs
|t_dS)a�
        Overrides the default whitespace chars

        Example::
            # default whitespace chars are space, <TAB> and newline
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
            
            # change to just treat newline as significant
            ParserElement.setDefaultWhitespaceChars(" \t")
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
        N)r&�DEFAULT_WHITE_CHARS��charsryryrz�setDefaultWhitespaceCharsTs
z'ParserElement.setDefaultWhitespaceCharscCs
|t_dS)a�
        Set class to be used for inclusion of string literals into a parser.
        
        Example::
            # default literal class used is Literal
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']


            # change to Suppress
            ParserElement.inlineLiteralsUsing(Suppress)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
        N)r&�_literalStringClass)r�ryryrz�inlineLiteralsUsingcsz!ParserElement.inlineLiteralsUsingcCs�t�|_d|_d|_d|_||_d|_tj|_	d|_
d|_d|_t�|_
d|_d|_d|_d|_d|_d|_d|_d|_d|_dS)NTFr�)NNN)r��parseAction�
failAction�strRepr�resultsName�
saveAsList�skipWhitespacer&rs�
whiteChars�copyDefaultWhiteChars�mayReturnEmpty�keepTabs�ignoreExprs�debug�streamlined�
mayIndexError�errmsg�modalResults�debugActions�re�callPreparse�
callDuringTry)r��savelistryryrzr�xs(zParserElement.__init__cCs<t�|�}|jdd�|_|jdd�|_|jr8tj|_|S)a$
        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
        for the same parsing pattern, using copies of the original parse element.
        
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
            
            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
        prints::
            [5120, 100, 655360, 268435456]
        Equivalent form of C{expr.copy()} is just C{expr()}::
            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
        N)r�ryr�r�r&rsr)r�Zcpyryryrzr��s
zParserElement.copycCs*||_d|j|_t|d�r&|j|j_|S)af
        Define name for this expression, makes debugging and exception messages clearer.
        
        Example::
            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
        �	Expected �	exception)r�r�r�r�r�rryryrz�setName�s


zParserElement.setNamecCs4|��}|�d�r"|dd�}d}||_||_|S)aP
        Define name for referencing matching tokens as a nested attribute
        of the returned parse results.
        NOTE: this returns a *copy* of the original C{ParserElement} object;
        this is so that the client can define a basic element, such as an
        integer, and reference it in multiple places with different names.

        You can also set results names using the abbreviated syntax,
        C{expr("name")} in place of C{expr.setResultsName("name")} - 
        see L{I{__call__}<__call__>}.

        Example::
            date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))

            # equivalent form:
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
        �*NrtT)r��endswithr|r�)r�r��listAllMatchesZnewselfryryrz�setResultsName�s
zParserElement.setResultsNameTcs@|r&|j�d�fdd�	}�|_||_nt|jd�r<|jj|_|S)z�Method to invoke the Python pdb debugger when this element is
           about to be parsed. Set C{breakFlag} to True to enable, False to
           disable.
        Tcsddl}|���||||�Sr�)�pdbZ	set_trace)rRr��	doActions�callPreParser��Z_parseMethodryrz�breaker�sz'ParserElement.setBreak.<locals>.breaker�_originalParseMethod)TT)�_parser�r�)r�Z	breakFlagr�ryr�rz�setBreak�s
zParserElement.setBreakcOs&tttt|���|_|�dd�|_|S)a
        Define one or more actions to perform when successfully matching parse element definition.
        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
         - s   = the original string being parsed (see note below)
         - loc = the location of the matching substring
         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
        If the functions in fns modify the tokens, they can return them as the return
        value from fn, and the modified list of tokens will replace the original.
        Otherwise, fn does not need to return any value.

        Optional keyword arguments:
         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

        Note: the default parsing behavior is to expand tabs in the input string
        before starting the parsing process.  See L{I{parseString}<parseString>} for more information
        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
        consistent view of the parsed string, the parse location, and line and column
        positions within the parsed string.
        
        Example::
            integer = Word(nums)
            date_str = integer + '/' + integer + '/' + integer

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

            # use parse action to convert to ints at parse time
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            date_str = integer + '/' + integer + '/' + integer

            # note that integer fields are now ints, not strings
            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
        r�F)r��maprrryr�r��r��fnsrryryrzr��s"zParserElement.setParseActioncOs4|jtttt|���7_|jp,|�dd�|_|S)z�
        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
        
        See examples in L{I{copy}<copy>}.
        r�F)ryr�r�rrr�r�r�ryryrz�addParseActionszParserElement.addParseActioncs^|�dd��|�dd�rtnt�|D] ����fdd�}|j�|�q$|jpV|�dd�|_|S)a�Add a boolean predicate function to expression's list of parse actions. See 
        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 
        functions passed to C{addCondition} need to return boolean success/fail of the condition.

        Optional keyword arguments:
         - message = define a custom message to be used in the raised exception
         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
         
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            year_int = integer.copy()
            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
            date_str = year_int + '/' + integer + '/' + integer

            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
        �messagezfailed user-defined condition�fatalFcs$tt��|||��s �||���dSr�)r	rrrZ��exc_type�fnr�ryrz�pa&sz&ParserElement.addCondition.<locals>.par�)r�r#r!ryrr�)r�r�rr�ryr�rz�addConditionszParserElement.addConditioncCs
||_|S)aDefine action to perform if parsing fails at this expression.
           Fail acton fn is a callable function that takes the arguments
           C{fn(s,loc,expr,err)} where:
            - s = string being parsed
            - loc = location where expression match was attempted and failed
            - expr = the parse expression that failed
            - err = the exception thrown
           The function returns no value.  It may throw C{L{ParseFatalException}}
           if it is desired to stop parsing immediately.)rz)r�r�ryryrz�
setFailAction-s
zParserElement.setFailActionc	CsNd}|rJd}|jD]4}z|�||�\}}d}qWqtk
rDYqXqq|S�NTF)r�r�r!)r�rRr�Z
exprsFound�eZdummyryryrz�_skipIgnorables:s


zParserElement._skipIgnorablescCsH|jr|�||�}|jrD|j}t|�}||krD|||krD|d7}q&|S�Nr�)r�r�r~rr�)r�rRr�Zwt�instrlenryryrz�preParseGs
zParserElement.preParsecCs|gfSr�ry�r�rRr�r�ryryrz�	parseImplSszParserElement.parseImplcCs|Sr�ry�r�rRr��	tokenlistryryrz�	postParseVszParserElement.postParsec
Cs�|j}|s|jr�|jdr,|jd|||�|rD|jrD|�||�}n|}|}zDz|�|||�\}}Wn(tk
r�t|t|�|j	|��YnXWnXt
k
r�}	z:|jdr�|jd||||	�|jr�|�||||	��W5d}	~	XYnXn�|�r|j�r|�||�}n|}|}|j�s&|t|�k�rjz|�|||�\}}Wn*tk
�rft|t|�|j	|��YnXn|�|||�\}}|�|||�}t
||j|j|jd�}
|j�r�|�s�|j�r�|�rTzN|jD]B}||||
�}|dk	�r�t
||j|j�o�t|t
tf�|jd�}
�q�WnFt
k
�rP}	z&|jd�r>|jd||||	��W5d}	~	XYnXnJ|jD]B}||||
�}|dk	�rZt
||j|j�o�t|t
tf�|jd�}
�qZ|�r�|jd�r�|jd|||||
�||
fS)Nrrs)r�r�r�)r�rzr�r�r�r�r�r!r�r�rr�r�r$r|r}r�ryr�r}r�)r�rRr�r�r�Z	debugging�prelocZtokensStart�tokens�errZ	retTokensr�ryryrz�
_parseNoCacheZst





�

�
zParserElement._parseNoCachecCs@z|j||dd�dWStk
r:t|||j|��YnXdS)NF)r�r)r�r#r!r��r�rRr�ryryrz�tryParse�szParserElement.tryParsec	Cs4z|�||�Wnttfk
r*YdSXdSdS)NFT)r�r!r�r�ryryrz�canParseNext�s
zParserElement.canParseNextc@seZdZdd�ZdS)zParserElement._UnboundedCachecs~i�t�|_���fdd�}�fdd�}�fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_dS)	Ncs��|��Sr��r��r�r��cache�not_in_cacheryrzr��sz3ParserElement._UnboundedCache.__init__.<locals>.getcs|�|<dSr�ry�r�rr��r�ryrz�set�sz3ParserElement._UnboundedCache.__init__.<locals>.setcs���dSr��rr�r�ryrzr�sz5ParserElement._UnboundedCache.__init__.<locals>.clearcst��Sr��r�r�r�ryrz�	cache_len�sz9ParserElement._UnboundedCache.__init__.<locals>.cache_len)r�r��types�
MethodTyper�r�rr�)r�r�r�rr�ryr�rzr��sz&ParserElement._UnboundedCache.__init__N�r�r�r�r�ryryryrz�_UnboundedCache�sr�Nc@seZdZdd�ZdS)�ParserElement._FifoCachecs�t�|_�t����fdd�}��fdd�}�fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_dS)	Ncs��|��Sr�r�r�r�ryrzr��s�.ParserElement._FifoCache.__init__.<locals>.getcs>|�|<t���kr:z��d�Wqtk
r6YqXqdS�NF)r��popitemr�r�)r��sizeryrzr��s�.ParserElement._FifoCache.__init__.<locals>.setcs���dSr�r�r�r�ryrzr�s�0ParserElement._FifoCache.__init__.<locals>.clearcst��Sr�r�r�r�ryrzr��s�4ParserElement._FifoCache.__init__.<locals>.cache_len)	r�r��_OrderedDictr�r�r�r�rr��r�r�r�r�rr�ry)r�r�r�rzr��s�!ParserElement._FifoCache.__init__Nr�ryryryrz�
_FifoCache�sr�c@seZdZdd�ZdS)r�cs�t�|_�i�t�g�����fdd�}���fdd�}��fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_	dS)	Ncs��|��Sr�r�r�r�ryrzr��sr�cs4|�|<t���kr&�����d�q��|�dSr�)r�r�popleftrr�)r��key_fifor�ryrzr��sr�cs������dSr�r�r�)r�r�ryrzr�sr�cst��Sr�r�r�r�ryrzr��sr�)
r�r��collections�dequer�r�r�r�rr�r�ry)r�r�r�r�rzr��sr�Nr�ryryryrzr��srcCsd\}}|||||f}tj��tj}|�|�}	|	|jkr�tj|d7<z|�||||�}	Wn8tk
r�}
z|�||
j	|
j
���W5d}
~
XYn.X|�||	d|	d��f�|	W5QR�Sn@tj|d7<t|	t
�r�|	�|	d|	d��fW5QR�SW5QRXdS)Nrr�r)r&�packrat_cache_lock�
packrat_cacher�r��packrat_cache_statsr�rr�rmr�r�r}rp)r�rRr�r�r�ZHITZMISS�lookupr�r�r�ryryrz�_parseCaches$


zParserElement._parseCachecCs(tj��dgttj�tjdd�<dSr�)r&r�rr�r�ryryryrz�
resetCaches
zParserElement.resetCache�cCs8tjs4dt_|dkr t��t_nt�|�t_tjt_dS)a�Enables "packrat" parsing, which adds memoizing to the parsing logic.
           Repeated parse attempts at the same string location (which happens
           often in many complex grammars) can immediately return a cached value,
           instead of re-executing parsing/validating code.  Memoizing is done of
           both valid results and parsing exceptions.
           
           Parameters:
            - cache_size_limit - (default=C{128}) - if an integer value is provided
              will limit the size of the packrat cache; if None is passed, then
              the cache size will be unbounded; if 0 is passed, the cache will
              be effectively disabled.
            
           This speedup may break existing programs that use parse actions that
           have side-effects.  For this reason, packrat parsing is disabled when
           you first import pyparsing.  To activate the packrat feature, your
           program must call the class method C{ParserElement.enablePackrat()}.  If
           your program uses C{psyco} to "compile as you go", you must call
           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
           Python will crash.  For best results, call C{enablePackrat()} immediately
           after importing pyparsing.
           
           Example::
               import pyparsing
               pyparsing.ParserElement.enablePackrat()
        TN)r&�_packratEnabledr�r�r�r�r�)Zcache_size_limitryryrz�
enablePackrat%szParserElement.enablePackratc
Cs�t��|js|��|jD]}|��q|js8|��}z<|�|d�\}}|rr|�||�}t	�t
�}|�||�Wn0tk
r�}ztjr��n|�W5d}~XYnX|SdS)aB
        Execute the parse expression with the given string.
        This is the main interface to the client code, once the complete
        expression has been built.

        If you want the grammar to require that the entire input string be
        successfully parsed, then set C{parseAll} to True (equivalent to ending
        the grammar with C{L{StringEnd()}}).

        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
        in order to report proper column numbers in parse actions.
        If the input string contains tabs and
        the grammar uses parse actions that use the C{loc} argument to index into the
        string being parsed, you can ensure you have a consistent view of the input
        string by:
         - calling C{parseWithTabs} on your grammar before calling C{parseString}
           (see L{I{parseWithTabs}<parseWithTabs>})
         - define your parse action using the full C{(s,loc,toks)} signature, and
           reference the input string using the parse action's C{s} argument
         - explictly expand the tabs in your input string before calling
           C{parseString}
        
        Example::
            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
        rN)
r&r�r��
streamliner�r��
expandtabsr�r�rr+r�verbose_stacktrace)r�rR�parseAllr�r�r�ZserXryryrz�parseStringHs$

zParserElement.parseStringc
cs6|js|��|jD]}|��q|js4t|���}t|�}d}|j}|j}t	�
�d}	z�||kr�|	|kr�z |||�}
|||
dd�\}}Wntk
r�|
d}YqZX||kr�|	d7}	||
|fV|r�|||�}
|
|kr�|}q�|d7}q�|}qZ|
d}qZWn4tk
�r0}zt	j
�r�n|�W5d}~XYnXdS)a�
        Scan the input string for expression matches.  Each match will return the
        matching tokens, start location, and end location.  May be called with optional
        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
        C{overlap} is specified, then overlapping matches will be reported.

        Note that the start and end locations are reported relative to the string
        being parsed.  See L{I{parseString}<parseString>} for more information on parsing
        strings with embedded tabs.

        Example::
            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
            print(source)
            for tokens,start,end in Word(alphas).scanString(source):
                print(' '*start + '^'*(end-start))
                print(' '*start + tokens[0])
        
        prints::
        
            sldjf123lsdjjkf345sldkjf879lkjsfd987
            ^^^^^
            sldjf
                    ^^^^^^^
                    lsdjjkf
                              ^^^^^^
                              sldkjf
                                       ^^^^^^
                                       lkjsfd
        rF�r�r�N)r�r�r�r�r�r�r�r�r�r&r�r!rr�)r�rR�
maxMatchesZoverlapr�r�r�Z
preparseFnZparseFn�matchesr�ZnextLocr�ZnextlocrXryryrz�
scanStringzsB




zParserElement.scanStringc
Cs�g}d}d|_z�|�|�D]Z\}}}|�|||��|rpt|t�rR||��7}nt|t�rf||7}n
|�|�|}q|�||d��dd�|D�}d�tt	t
|���WStk
r�}ztj
rƂn|�W5d}~XYnXdS)af
        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
        be returned from a parse action.  To use C{transformString}, define a grammar and
        attach a parse action to it that modifies the returned token list.
        Invoking C{transformString()} on a target string will then scan for matches,
        and replace the matched text patterns according to the logic in the parse
        action.  C{transformString()} returns the resulting transformed string.
        
        Example::
            wd = Word(alphas)
            wd.setParseAction(lambda toks: toks[0].title())
            
            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
        Prints::
            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
        rTNcSsg|]}|r|�qSryry)r��oryryrzr��sz1ParserElement.transformString.<locals>.<listcomp>r�)r�r�rr}r$r�r�r�r�r��_flattenrr&r�)r�rRr*ZlastErxr�r�rXryryrzr��s(



zParserElement.transformStringc
CsRztdd�|�||�D��WStk
rL}ztjr8�n|�W5d}~XYnXdS)a�
        Another extension to C{L{scanString}}, simplifying the access to the tokens found
        to match the given parse expression.  May be called with optional
        C{maxMatches} argument, to clip searching after 'n' matches are found.
        
        Example::
            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
            cap_word = Word(alphas.upper(), alphas.lower())
            
            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))

            # the sum() builtin can be used to merge results into a single ParseResults object
            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
        prints::
            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
        cSsg|]\}}}|�qSryry)r�rxr�r�ryryrzr��sz.ParserElement.searchString.<locals>.<listcomp>N)r$r�rr&r�)r�rRr�rXryryrz�searchString�szParserElement.searchStringc	csTd}d}|j||d�D]*\}}}|||�V|r<|dV|}q||d�VdS)a[
        Generator method to split a string using the given expression as a separator.
        May be called with optional C{maxsplit} argument, to limit the number of splits;
        and the optional C{includeSeparators} argument (default=C{False}), if the separating
        matching text should be included in the split results.
        
        Example::        
            punc = oneOf(list(".,;:/-!?"))
            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
        prints::
            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
        r)r�N)r�)	r�rR�maxsplitZincludeSeparatorsZsplitsZlastrxr�r�ryryrzr�s

zParserElement.splitcCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)a�
        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
        converts them to L{Literal}s by default.
        
        Example::
            greet = Word(alphas) + "," + Word(alphas) + "!"
            hello = "Hello, World!"
            print (hello, "->", greet.parseString(hello))
        Prints::
            Hello, World! -> ['Hello', ',', 'World', '!']
        �4Cannot combine element of type %s with ParserElementrs��
stacklevelN)	r}r�r&rw�warnings�warnr��
SyntaxWarningrr"ryryrzrs


�zParserElement.__add__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||S)z]
        Implementation of + operator when left operand is not a C{L{ParserElement}}
        r�rsr�N�r}r�r&rwr�r�r�r�r"ryryrzr#1s


�zParserElement.__radd__cCsJt|t�rt�|�}t|t�s:tjdt|�tdd�dS|t�	�|S)zQ
        Implementation of - operator, returns C{L{And}} with error stop
        r�rsr�N)
r}r�r&rwr�r�r�r�r�
_ErrorStopr"ryryrz�__sub__=s


�zParserElement.__sub__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||S)z]
        Implementation of - operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rsub__Is


�zParserElement.__rsub__cs�t|t�r|d}}n�t|t�r�|ddd�}|ddkrHd|df}t|dt�r�|ddkr�|ddkrvt��S|ddkr�t��S�|dt��Sq�t|dt�r�t|dt�r�|\}}||8}q�tdt|d�t|d���ntdt|���|dk�rtd��|dk�rtd	��||k�r6dk�rBnntd
��|�r���fdd��|�r�|dk�rt��|�}nt�g|��|�}n�|�}n|dk�r��}nt�g|�}|S)
a�
        Implementation of * operator, allows use of C{expr * 3} in place of
        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
        may also include C{None} as in:
         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
              to C{expr*n + L{ZeroOrMore}(expr)}
              (read as "at least n instances of C{expr}")
         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
              (read as "0 to n instances of C{expr}")
         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

        Note that C{expr*(None,n)} does not raise an exception if
        more than n exprs exist in the input stream; that is,
        C{expr*(None,n)} does not enforce a maximum number of expr
        occurrences.  If this behavior is desired, then write
        C{expr*(None,n) + ~expr}
        r)NNNrsr�z7cannot multiply 'ParserElement' and ('%s','%s') objectsz0cannot multiply 'ParserElement' and '%s' objectsz/cannot multiply ParserElement by negative valuez@second tuple value must be greater or equal to first tuple valuez+cannot multiply ParserElement by 0 or (0,0)cs(|dkrt��|d��St��SdSr�)r��n��makeOptionalListr�ryrzr��sz/ParserElement.__mul__.<locals>.makeOptionalList)	r}rv�tupler4rr�r��
ValueErrorr)r�rZminElementsZoptElementsr�ryr�rz�__mul__UsD







zParserElement.__mul__cCs
|�|�Sr�)rr"ryryrz�__rmul__�szParserElement.__rmul__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zI
        Implementation of | operator - returns C{L{MatchFirst}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__or__�s


�zParserElement.__or__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||BS)z]
        Implementation of | operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__ror__�s


�zParserElement.__ror__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zA
        Implementation of ^ operator - returns C{L{Or}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__xor__�s


�zParserElement.__xor__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||AS)z]
        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rxor__�s


�zParserElement.__rxor__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zC
        Implementation of & operator - returns C{L{Each}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__and__�s


�zParserElement.__and__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||@S)z]
        Implementation of & operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rand__�s


�zParserElement.__rand__cCst|�S)zE
        Implementation of ~ operator - returns C{L{NotAny}}
        )rr�ryryrz�
__invert__�szParserElement.__invert__cCs|dk	r|�|�S|��SdS)a

        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
        
        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
        passed as C{True}.
           
        If C{name} is omitted, same as calling C{L{copy}}.

        Example::
            # these are equivalent
            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
        N)r�r�rryryrz�__call__�s
zParserElement.__call__cCst|�S)z�
        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
        cluttering up returned output.
        )r-r�ryryrz�suppress�szParserElement.suppresscCs
d|_|S)a
        Disables the skipping of whitespace before matching the characters in the
        C{ParserElement}'s defined pattern.  This is normally only used internally by
        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
        F�r~r�ryryrz�leaveWhitespaceszParserElement.leaveWhitespacecCsd|_||_d|_|S)z8
        Overrides the default whitespace chars
        TF)r~rr�)r�ruryryrz�setWhitespaceChars
sz ParserElement.setWhitespaceCharscCs
d|_|S)z�
        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
        Must be called before C{parseString} when the input grammar contains elements that
        match C{<TAB>} characters.
        T)r�r�ryryrz�
parseWithTabsszParserElement.parseWithTabscCsLt|t�rt|�}t|t�r4||jkrH|j�|�n|j�t|����|S)a�
        Define expression to be ignored (e.g., comments) while doing pattern
        matching; may be called repeatedly, to define multiple comment or other
        ignorable patterns.
        
        Example::
            patt = OneOrMore(Word(alphas))
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
            
            patt.ignore(cStyleComment)
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
        )r}r�r-r�rr�r"ryryrz�ignores


zParserElement.ignorecCs"|pt|pt|ptf|_d|_|S)zT
        Enable display of debugging messages while doing pattern matching.
        T)rTrWrYr�r�)r�ZstartActionZ
successActionZexceptionActionryryrz�setDebugActions6s�zParserElement.setDebugActionscCs|r|�ttt�nd|_|S)a�
        Enable display of debugging messages while doing pattern matching.
        Set C{flag} to True to enable, False to disable.

        Example::
            wd = Word(alphas).setName("alphaword")
            integer = Word(nums).setName("numword")
            term = wd | integer
            
            # turn on debugging for wd
            wd.setDebug()

            OneOrMore(term).parseString("abc 123 xyz 890")
        
        prints::
            Match alphaword at loc 0(1,1)
            Matched alphaword -> ['abc']
            Match alphaword at loc 3(1,4)
            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
            Match alphaword at loc 7(1,8)
            Matched alphaword -> ['xyz']
            Match alphaword at loc 11(1,12)
            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
            Match alphaword at loc 15(1,16)
            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

        The output shown is that produced by the default debug actions - custom debug actions can be
        specified using L{setDebugActions}. Prior to attempting
        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
        which makes debugging and exception messages easier to understand - for instance, the default
        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
        F)rrTrWrYr�)r��flagryryrz�setDebug@s#zParserElement.setDebugcCs|jSr�)r�r�ryryrzr�iszParserElement.__str__cCst|�Sr�r�r�ryryrzr�lszParserElement.__repr__cCsd|_d|_|Sr�)r�r{r�ryryrzr�oszParserElement.streamlinecCsdSr�ryr�ryryrz�checkRecursiontszParserElement.checkRecursioncCs|�g�dS)zj
        Check defined expressions for valid structure, check for infinite recursive definitions.
        N)r)r��
validateTraceryryrz�validatewszParserElement.validatecCs�z|��}Wn2tk
r>t|d��}|��}W5QRXYnXz|�||�WStk
r~}ztjrj�n|�W5d}~XYnXdS)z�
        Execute the parse expression on the given file or filename.
        If a filename is specified (instead of a file object),
        the entire file is opened, read, and closed before parsing.
        �rN)�readr��openr�rr&r�)r�Zfile_or_filenamer�Z
file_contents�frXryryrz�	parseFile}szParserElement.parseFilecsHt|t�r"||kp t|�t|�kSt|t�r6|�|�Stt|�|kSdSr�)r}r&�varsr�r��superr"�rmryrz�__eq__�s



zParserElement.__eq__cCs
||kSr�ryr"ryryrz�__ne__�szParserElement.__ne__cCstt|��Sr�)�hash�idr�ryryrz�__hash__�szParserElement.__hash__cCs||kSr�ryr"ryryrz�__req__�szParserElement.__req__cCs
||kSr�ryr"ryryrz�__rne__�szParserElement.__rne__cCs4z|jt|�|d�WdStk
r.YdSXdS)a�
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.
           
        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
            
        Example::
            expr = Word(nums)
            assert expr.matches("100")
        �r�TFN)r�r�r)r�Z
testStringr�ryryrzr��s

zParserElement.matches�#cCs�t|t�r"tttj|������}t|t�r4t|�}g}g}d}	|D�]�}
|dk	r^|�	|
d�sf|rr|
sr|�
|
�qD|
sxqDd�|�|
g}g}z:|
�dd�}
|j
|
|d�}|�
|j|d��|	o�|}	Wn�tk
�rr}
z�t|
t�r�dnd	}d|
k�r*|�
t|
j|
��|�
d
t|
j|
�dd|�n|�
d
|
jd|�|�
d
t|
��|	�o\|}	|
}W5d}
~
XYnDtk
�r�}z$|�
dt|��|	�o�|}	|}W5d}~XYnX|�r�|�r�|�
d	�td�|��|�
|
|f�qD|	|fS)a3
        Execute the parse expression on a series of test strings, showing each
        test, the parsed results or where the parse failed. Quick and easy way to
        run a parse expression against a list of sample strings.
           
        Parameters:
         - tests - a list of separate test strings, or a multiline string of test strings
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
              string; pass None to disable comment filtering
         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
              if False, only dump nested list
         - printResults - (default=C{True}) prints test output to stdout
         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

        Returns: a (success, results) tuple, where success indicates that all tests succeeded
        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
        test's output
        
        Example::
            number_expr = pyparsing_common.number.copy()

            result = number_expr.runTests('''
                # unsigned integer
                100
                # negative integer
                -100
                # float with scientific notation
                6.02e23
                # integer with scientific notation
                1e-12
                ''')
            print("Success" if result[0] else "Failed!")

            result = number_expr.runTests('''
                # stray character
                100Z
                # missing leading digit before '.'
                -.100
                # too many '.'
                3.14.159
                ''', failureTests=True)
            print("Success" if result[0] else "Failed!")
        prints::
            # unsigned integer
            100
            [100]

            # negative integer
            -100
            [-100]

            # float with scientific notation
            6.02e23
            [6.02e+23]

            # integer with scientific notation
            1e-12
            [1e-12]

            Success
            
            # stray character
            100Z
               ^
            FAIL: Expected end of text (at char 3), (line:1, col:4)

            # missing leading digit before '.'
            -.100
            ^
            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

            # too many '.'
            3.14.159
                ^
            FAIL: Expected end of text (at char 4), (line:1, col:5)

            Success

        Each test string must be on a single line. If you want to test a string that spans multiple
        lines, create a test like this::

            expr.runTest(r"this is a test\n of strings that spans \n 3 lines")
        
        (Note that this is a raw string literal, you must include the leading 'r'.)
        TNFr2�\nr%)rDz(FATAL)r�� r��^zFAIL: zFAIL-EXCEPTION: )r}r�r�r�r~r��rstrip�
splitlinesrr�rr�r�r�rArr#rIr�r;rprQ)r�Ztestsr�ZcommentZfullDumpZprintResultsZfailureTestsZ
allResultsZcomments�successrxr*�resultr�r�rXryryrz�runTests�sNW




$


zParserElement.runTests)F)F)T)T)TT)TT)r�)F)N)T)F)T)Tr&TTF)Or�r�r�r�rsr��staticmethodrvrxr�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�rr�r�r�r�r�r�r�r��_MAX_INTr�r�r�r�rr#r�r�rrrrrrrrrr	r
rr
rrrrr�r�r�rrrrrr"r#r$r�r.�
__classcell__ryryrrzr&Os�




&




G

"
2G+D
			

)

cs eZdZdZ�fdd�Z�ZS)r.zT
    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    cstt|�jdd�dS�NF�r�)rr.r�r�rryrzr�@	szToken.__init__�r�r�r�r�r�r1ryryrrzr.<	scs eZdZdZ�fdd�Z�ZS)rz,
    An empty token, will always match.
    cs$tt|���d|_d|_d|_dS)NrTF)rrr�r�r�r�r�rryrzr�H	szEmpty.__init__r4ryryrrzrD	scs*eZdZdZ�fdd�Zddd�Z�ZS)rz(
    A token that will never match.
    cs*tt|���d|_d|_d|_d|_dS)NrTFzUnmatchable token)rrr�r�r�r�r�r�rryrzr�S	s
zNoMatch.__init__TcCst|||j|��dSr�)r!r�r�ryryrzr�Z	szNoMatch.parseImpl)T�r�r�r�r�r�r�r1ryryrrzrO	scs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Token to exactly match a specified string.
    
    Example::
        Literal('blah').parseString('blah')  # -> ['blah']
        Literal('blah').parseString('blahfooblah')  # -> ['blah']
        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    
    For case-insensitive matching, use L{CaselessLiteral}.
    
    For keyword matching (force word break before and after the matched string),
    use L{Keyword} or L{CaselessKeyword}.
    cs�tt|���||_t|�|_z|d|_Wn*tk
rVtj	dt
dd�t|_YnXdt
|j�|_d|j|_d|_d|_dS)Nrz2null string passed to Literal; use Empty() insteadrsr��"%s"r�F)rrr��matchr��matchLen�firstMatchCharr�r�r�r�rrmr�r�r�r�r��r��matchStringrryrzr�l	s
�zLiteral.__init__TcCsJ|||jkr6|jdks&|�|j|�r6||j|jfSt|||j|��dSr�)r9r8�
startswithr7r!r�r�ryryrzr�	s��zLiteral.parseImpl)Tr5ryryrrzr^	s
csLeZdZdZedZd�fdd�	Zddd	�Z�fd
d�Ze	dd
��Z
�ZS)ra\
    Token to exactly match a specified string as a keyword, that is, it must be
    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    Accepts two optional constructor arguments in addition to the keyword string:
     - C{identChars} is a string of characters that would be valid identifier characters,
          defaulting to all alphanumerics + "_" and "$"
     - C{caseless} allows case-insensitive matching, default is C{False}.
       
    Example::
        Keyword("start").parseString("start")  # -> ['start']
        Keyword("start").parseString("starting")  # -> Exception

    For case-insensitive matching, use L{CaselessKeyword}.
    �_$NFcs�tt|���|dkrtj}||_t|�|_z|d|_Wn$tk
r^t	j
dtdd�YnXd|j|_d|j|_
d|_d|_||_|r�|��|_|��}t|�|_dS)Nrz2null string passed to Keyword; use Empty() insteadrsr�r6r�F)rrr��DEFAULT_KEYWORD_CHARSr7r�r8r9r�r�r�r�r�r�r�r��caseless�upper�
caselessmatchr��
identChars)r�r;rBr?rryrzr��	s*
�
zKeyword.__init__TcCs|jr|||||j���|jkr�|t|�|jksL|||j��|jkr�|dksj||d��|jkr�||j|jfSnv|||jkr�|jdks�|�|j|�r�|t|�|jks�|||j|jkr�|dks�||d|jkr�||j|jfSt	|||j
|��dSr�)r?r8r@rAr�rBr7r9r<r!r�r�ryryrzr��	s4����������zKeyword.parseImplcstt|���}tj|_|Sr�)rrr�r>rB)r�r�rryrzr��	szKeyword.copycCs
|t_dS)z,Overrides the default Keyword chars
        N)rr>rtryryrz�setDefaultKeywordChars�	szKeyword.setDefaultKeywordChars)NF)T)r�r�r�r�r5r>r�r�r�r/rCr1ryryrrzr�	s
cs*eZdZdZ�fdd�Zddd�Z�ZS)r
al
    Token to match a specified string, ignoring case of letters.
    Note: the matched results will always be in the case of the given
    match string, NOT the case of the input text.

    Example::
        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
        
    (Contrast with example for L{CaselessKeyword}.)
    cs6tt|��|���||_d|j|_d|j|_dS)Nz'%s'r�)rr
r�r@�returnStringr�r�r:rryrzr��	szCaselessLiteral.__init__TcCs@||||j���|jkr,||j|jfSt|||j|��dSr�)r8r@r7rDr!r�r�ryryrzr��	szCaselessLiteral.parseImpl)Tr5ryryrrzr
�	s
cs,eZdZdZd�fdd�	Zd	dd�Z�ZS)
r	z�
    Caseless version of L{Keyword}.

    Example::
        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
        
    (Contrast with example for L{CaselessLiteral}.)
    Ncstt|�j||dd�dS)NT�r?)rr	r�)r�r;rBrryrzr��	szCaselessKeyword.__init__TcCsj||||j���|jkrV|t|�|jksF|||j��|jkrV||j|jfSt|||j|��dSr�)r8r@rAr�rBr7r!r�r�ryryrzr��	s��zCaselessKeyword.parseImpl)N)Tr5ryryrrzr	�	scs,eZdZdZd�fdd�	Zd	dd�Z�ZS)
rnax
    A variation on L{Literal} which matches "close" matches, that is, 
    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
     - C{match_string} - string to be matched
     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    
    The results from a successful parse will contain the matched text from the input string and the following named results:
     - C{mismatches} - a list of the positions within the match_string where mismatches were found
     - C{original} - the original match_string used to compare against the input string
    
    If C{mismatches} is an empty list, then the match was an exact match.
    
    Example::
        patt = CloseMatch("ATCATCGAATGGA")
        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

        # exact match
        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

        # close match allowing up to 2 mismatches
        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    r�csBtt|���||_||_||_d|j|jf|_d|_d|_dS)Nz&Expected %r (with up to %d mismatches)F)	rrnr�r��match_string�
maxMismatchesr�r�r�)r�rFrGrryrzr�

szCloseMatch.__init__TcCs�|}t|�}|t|j�}||kr�|j}d}g}	|j}
tt|||�|j��D]2\}}|\}}
||
krN|	�|�t|	�|
krNq�qN|d}t|||�g�}|j|d<|	|d<||fSt|||j|��dS)Nrr��original�
mismatches)	r�rFrGr�r�rr$r!r�)r�rRr�r��startr��maxlocrFZmatch_stringlocrIrGZs_m�src�mat�resultsryryrzr�
s( 

zCloseMatch.parseImpl)r�)Tr5ryryrrzrn�	s	cs8eZdZdZd
�fdd�	Zdd	d
�Z�fdd�Z�ZS)r1a	
    Token for matching words composed of allowed character sets.
    Defined with string containing all allowed initial characters,
    an optional string containing allowed body characters (if omitted,
    defaults to the initial character set), and an optional minimum,
    maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction. An optional
    C{excludeChars} parameter can list characters that might be found in 
    the input C{bodyChars} string; useful to define a word of all printables
    except for one or two characters, for instance.
    
    L{srange} is useful for defining custom character set strings for defining 
    C{Word} expressions, using range notation from regular expression character sets.
    
    A common mistake is to use C{Word} to match a specific literal string, as in 
    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    I{sets} of matchable characters. This expression would match "Add", "AAA",
    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    To match an exact literal string, use L{Literal} or L{Keyword}.

    pyparsing includes helper strings for building Words:
     - L{alphas}
     - L{nums}
     - L{alphanums}
     - L{hexnums}
     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
     - L{printables} (any non-whitespace character)

    Example::
        # a word composed of digits
        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
        
        # a word with a leading capital, and zero or more lowercase
        capital_word = Word(alphas.upper(), alphas.lower())

        # hostnames are alphanumeric, with leading alpha, and '-'
        hostname = Word(alphas, alphanums+'-')
        
        # roman numeral (not a strict parser, accepts invalid mix of characters)
        roman = Word("IVXLCDM")
        
        # any string of non-whitespace characters, except for ','
        csv_value = Word(printables, excludeChars=",")
    Nr�rFcs�tt|����rFd��fdd�|D��}|rFd��fdd�|D��}||_t|�|_|rl||_t|�|_n||_t|�|_|dk|_	|dkr�t
d��||_|dkr�||_nt
|_|dkr�||_||_t|�|_d|j|_d	|_||_d
|j|jk�r�|dk�r�|dk�r�|dk�r�|j|jk�r8dt|j�|_nHt|j�dk�rfdt�|j�t|j�f|_nd
t|j�t|j�f|_|j�r�d|jd|_zt�|j�|_Wntk
�r�d|_YnXdS)Nr�c3s|]}|�kr|VqdSr�ryr���excludeCharsryrzr�`
sz Word.__init__.<locals>.<genexpr>c3s|]}|�kr|VqdSr�ryr�rOryrzr�b
srr�zZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permittedr�Fr(z[%s]+z%s[%s]*z	[%s][%s]*z\b)rr1r�r��
initCharsOrigr��	initChars�
bodyCharsOrig�	bodyChars�maxSpecifiedr��minLen�maxLenr0r�r�r�r��	asKeyword�_escapeRegexRangeChars�reStringr�r��escape�compilerp)r�rRrT�min�max�exactrXrPrrOrzr�]
s\



0
����z
Word.__init__Tc
Cs>|jr<|j�||�}|s(t|||j|��|��}||��fS|||jkrZt|||j|��|}|d7}t|�}|j}||j	}t
||�}||kr�|||kr�|d7}q�d}	|||jkr�d}	|jr�||kr�|||kr�d}	|j
�r|dkr�||d|k�s||k�r|||k�rd}	|	�r.t|||j|��||||�fS)Nr�FTr)r�r7r!r��end�grouprRr�rTrWr]rVrUrX)
r�rRr�r�r-rJr�Z	bodycharsrKZthrowExceptionryryrzr��
s6


2zWord.parseImplcsvztt|���WStk
r$YnX|jdkrpdd�}|j|jkr`d||j�||j�f|_nd||j�|_|jS)NcSs$t|�dkr|dd�dS|SdS)N��...r��r�ryryrz�
charsAsStr�
sz Word.__str__.<locals>.charsAsStrz	W:(%s,%s)zW:(%s))rr1r�rpr{rQrS)r�rerryrzr��
s
zWord.__str__)Nr�rrFN)T�r�r�r�r�r�r�r�r1ryryrrzr1.
s.6
#csFeZdZdZee�d��Zd�fdd�	Zddd�Z	�fd	d
�Z
�ZS)
r)a�
    Token for matching strings that match a given regular expression.
    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 
    named parse results.

    Example::
        realnum = Regex(r"[+-]?\d+\.\d*")
        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    z[A-Z]rcs�tt|���t|t�r�|s,tjdtdd�||_||_	zt
�|j|j	�|_
|j|_Wq�t
jk
r�tjd|tdd��Yq�Xn2t|tj�r�||_
t|�|_|_||_	ntd��t|�|_d|j|_d|_d|_d	S)
z�The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.z0null string passed to Regex; use Empty() insteadrsr��$invalid pattern (%s) passed to RegexzCRegex may only be constructed with a string or a compiled RE objectr�FTN)rr)r�r}r�r�r�r��pattern�flagsr�r\rZ�
sre_constants�error�compiledREtyper~r�r�r�r�r�r�)r�rhrirryrzr��
s:
�
�
�
zRegex.__init__TcCs`|j�||�}|s"t|||j|��|��}|��}t|���}|rX|D]}||||<qF||fSr�)r�r7r!r�r`�	groupdictr$ra)r�rRr�r�r-�dr�r�ryryrzr��
szRegex.parseImplcsFztt|���WStk
r$YnX|jdkr@dt|j�|_|jS)NzRe:(%s))rr)r�rpr{r�rhr�rryrzr�
s
z
Regex.__str__)r)T)r�r�r�r�r�r�r\rlr�r�r�r1ryryrrzr)�
s
"

cs8eZdZdZd�fdd�	Zddd�Z�fd	d
�Z�ZS)
r'a�
    Token for matching strings that are delimited by quoting characters.
    
    Defined with the following parameters:
        - quoteChar - string of one or more characters defining the quote delimiting string
        - escChar - character to escape quotes, typically backslash (default=C{None})
        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

    Example::
        qs = QuotedString('"')
        print(qs.searchString('lsjdf "This is the quote" sldjf'))
        complex_qs = QuotedString('{{', endQuoteChar='}}')
        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
        sql_qs = QuotedString('"', escQuote='""')
        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    prints::
        [['This is the quote']]
        [['This is the "quote"']]
        [['This is the quote with "embedded" quotes']]
    NFTc
sNtt����|��}|s0tjdtdd�t��|dkr>|}n"|��}|s`tjdtdd�t��|�_t	|��_
|d�_|�_t	|��_
|�_|�_|�_|�_|r�tjtjB�_dt��j�t�jd�|dk	r�t|�p�df�_n<d�_dt��j�t�jd�|dk	�rt|��pdf�_t	�j�d	k�rp�jd
d��fdd
�tt	�j�d	dd�D��d7_|�r��jdt�|�7_|�r��jdt�|�7_t��j�d�_�jdt��j�7_zt��j�j��_�j�_Wn0tjk
�r&tjd�jtdd��YnXt ���_!d�j!�_"d�_#d�_$dS)Nz$quoteChar cannot be the empty stringrsr�z'endQuoteChar cannot be the empty stringrz%s(?:[^%s%s]r�z%s(?:[^%s\n\r%s]r�z|(?:z)|(?:c3s4|],}dt��jd|��t�j|�fVqdS)z%s[^%s]N)r�r[�endQuoteCharrYr&r�ryrzr�Xs��z(QuotedString.__init__.<locals>.<genexpr>rt�)z|(?:%s)z|(?:%s.)z(.)z)*%srgr�FT)%rr'r�r�r�r�r��SyntaxError�	quoteCharr��quoteCharLen�firstQuoteCharro�endQuoteCharLen�escChar�escQuote�unquoteResults�convertWhitespaceEscapesr��	MULTILINE�DOTALLrir[rYrhr�r��escCharReplacePatternr\rZrjrkr�r�r�r�r�)r�rrrvrwZ	multilinerxroryrr�rzr�/s|



��
������
zQuotedString.__init__c	Cs�|||jkr|j�||�pd}|s4t|||j|��|��}|��}|jr�||j|j	�}t
|t�r�d|kr�|jr�ddddd�}|�
�D]\}}|�||�}q�|jr�t�|jd|�}|jr�|�|j|j�}||fS)N�\�	r2��
)�\tr'z\fz\rz\g<1>)rtr�r7r!r�r`rarxrsrur}r�ryr�r�rvr�r|rwro)	r�rRr�r�r-r�Zws_mapZwslitZwscharryryrzr�ps* 
�zQuotedString.parseImplcsHztt|���WStk
r$YnX|jdkrBd|j|jf|_|jS)Nz.quoted string, starting with %s ending with %s)rr'r�rpr{rrror�rryrzr��s
zQuotedString.__str__)NNFTNT)Trfryryrrzr'sA
#cs8eZdZdZd�fdd�	Zddd�Z�fd	d
�Z�ZS)
ra�
    Token for matching words composed of characters I{not} in a given set (will
    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    Defined with string containing all disallowed characters, and an optional
    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction.

    Example::
        # define a comma-separated-value as anything that is not a ','
        csv_value = CharsNotIn(',')
        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    prints::
        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    r�rcs�tt|���d|_||_|dkr*td��||_|dkr@||_nt|_|dkrZ||_||_t	|�|_
d|j
|_|jdk|_d|_
dS)NFr�zfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedrr�)rrr�r~�notCharsr�rVrWr0r�r�r�r�r�)r�r�r]r^r_rryrzr��s 
zCharsNotIn.__init__TcCs�|||jkrt|||j|��|}|d7}|j}t||jt|��}||krb|||krb|d7}qD|||jkr�t|||j|��||||�fSr�)r�r!r�r]rWr�rV)r�rRr�r�rJZnotchars�maxlenryryrzr��s
�
zCharsNotIn.parseImplcsfztt|���WStk
r$YnX|jdkr`t|j�dkrTd|jdd�|_nd|j|_|jS)Nrbz
!W:(%s...)z!W:(%s))rrr�rpr{r�r�r�rryrzr��s
zCharsNotIn.__str__)r�rr)Trfryryrrzr�s
cs<eZdZdZdddddd�Zd�fdd�	Zddd�Z�ZS)r0a�
    Special matching class for matching whitespace.  Normally, whitespace is ignored
    by pyparsing grammars.  This class is included when some whitespace structures
    are significant.  Define with a string containing the whitespace characters to be
    matched; default is C{" \t\r\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    as defined for the C{L{Word}} class.
    z<SPC>z<TAB>z<LF>z<CR>z<FF>)r(r~r2r�r� 	
r�rcs�tt����|�_��d��fdd��jD���d�dd��jD���_d�_d�j�_	|�_
|dkrt|�_nt�_|dkr�|�_|�_
dS)Nr�c3s|]}|�jkr|VqdSr�)�
matchWhiter�r�ryrzr��s
z!White.__init__.<locals>.<genexpr>css|]}tj|VqdSr�)r0�	whiteStrsr�ryryrzr��sTr�r)
rr0r�r�r
r�rr�r�r�rVrWr0)r�Zwsr]r^r_rr�rzr��s zWhite.__init__TcCs�|||jkrt|||j|��|}|d7}||j}t|t|��}||krb|||jkrb|d7}qB|||jkr�t|||j|��||||�fSr�)r�r!r�rWr]r�rV)r�rRr�r�rJrKryryrzr�	s

zWhite.parseImpl)r�r�rr)T)r�r�r�r�r�r�r�r1ryryrrzr0�s�cseZdZ�fdd�Z�ZS)�_PositionTokencs(tt|���|jj|_d|_d|_dSr�)rr�r�rmr�r�r�r�r�rryrzr�s
z_PositionToken.__init__�r�r�r�r�r1ryryrrzr�sr�cs2eZdZdZ�fdd�Zdd�Zd	dd�Z�ZS)
rzb
    Token to advance to a specific column of input text; useful for tabular report scraping.
    cstt|���||_dSr�)rrr�r;)r��colnorryrzr�$szGoToColumn.__init__cCs\t||�|jkrXt|�}|jr*|�||�}||krX||��rXt||�|jkrX|d7}q*|Sr�)r;r�r�r��isspace)r�rRr�r�ryryrzr�(s$
zGoToColumn.preParseTcCsDt||�}||jkr"t||d|��||j|}|||�}||fS)NzText not in expected column�r;r!)r�rRr�r�ZthiscolZnewlocr�ryryrzr�1s

zGoToColumn.parseImpl)T)r�r�r�r�r�r�r�r1ryryrrzr s	cs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Matches if current position is at the beginning of a line within the parse string
    
    Example::
    
        test = '''        AAA this line
        AAA and this line
          AAA but not this one
        B AAA and definitely not this one
        '''

        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
            print(t)
    
    Prints::
        ['AAA', ' this line']
        ['AAA', ' and this line']    

    cstt|���d|_dS)NzExpected start of line)rrr�r�r�rryrzr�OszLineStart.__init__TcCs*t||�dkr|gfSt|||j|��dSr�)r;r!r�r�ryryrzr�SszLineStart.parseImpl)Tr5ryryrrzr:scs*eZdZdZ�fdd�Zddd�Z�ZS)rzU
    Matches if current position is at the end of a line within the parse string
    cs,tt|���|�tj�dd��d|_dS)Nr2r�zExpected end of line)rrr�r
r&rsr�r�r�rryrzr�\szLineEnd.__init__TcCsb|t|�kr6||dkr$|ddfSt|||j|��n(|t|�krN|dgfSt|||j|��dS)Nr2r��r�r!r�r�ryryrzr�aszLineEnd.parseImpl)Tr5ryryrrzrXscs*eZdZdZ�fdd�Zddd�Z�ZS)r,zM
    Matches if current position is at the beginning of the parse string
    cstt|���d|_dS)NzExpected start of text)rr,r�r�r�rryrzr�pszStringStart.__init__TcCs0|dkr(||�|d�kr(t|||j|��|gfSr�)r�r!r�r�ryryrzr�tszStringStart.parseImpl)Tr5ryryrrzr,lscs*eZdZdZ�fdd�Zddd�Z�ZS)r+zG
    Matches if current position is at the end of the parse string
    cstt|���d|_dS)NzExpected end of text)rr+r�r�r�rryrzr�szStringEnd.__init__TcCs^|t|�krt|||j|��n<|t|�kr6|dgfS|t|�krJ|gfSt|||j|��dSr�r�r�ryryrzr��szStringEnd.parseImpl)Tr5ryryrrzr+{scs.eZdZdZef�fdd�	Zddd�Z�ZS)r3ap
    Matches if the current position is at the beginning of a Word, and
    is not preceded by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    the string being parsed, or at the beginning of a line.
    cs"tt|���t|�|_d|_dS)NzNot at the start of a word)rr3r�r��	wordCharsr��r�r�rryrzr��s
zWordStart.__init__TcCs@|dkr8||d|jks(|||jkr8t|||j|��|gfSr�)r�r!r�r�ryryrzr��s�zWordStart.parseImpl)T�r�r�r�r�rXr�r�r1ryryrrzr3�scs.eZdZdZef�fdd�	Zddd�Z�ZS)r2aZ
    Matches if the current position is at the end of a Word, and
    is not followed by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    the string being parsed, or at the end of a line.
    cs(tt|���t|�|_d|_d|_dS)NFzNot at the end of a word)rr2r�r�r�r~r�r�rryrzr��s
zWordEnd.__init__TcCsPt|�}|dkrH||krH|||jks8||d|jkrHt|||j|��|gfSr�)r�r�r!r�)r�rRr�r�r�ryryrzr��s�zWordEnd.parseImpl)Tr�ryryrrzr2�scs�eZdZdZd�fdd�	Zdd�Zdd�Zd	d
�Z�fdd�Z�fd
d�Z	�fdd�Z
d�fdd�	Zgfdd�Z�fdd�Z
�ZS)r"z^
    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
    Fcs�tt|��|�t|t�r"t|�}t|t�r<t�|�g|_	nht|t
�rxt|�}tdd�|D��rlttj|�}t|�|_	n,zt|�|_	Wnt
k
r�|g|_	YnXd|_dS)Ncss|]}t|t�VqdSr�)r}r�)r�rSryryrzr��sz+ParseExpression.__init__.<locals>.<genexpr>F)rr"r�r}r�r�r�r&rw�exprsr�allr�r�r��r�r�r�rryrzr��s


zParseExpression.__init__cCs
|j|Sr�)r�r�ryryrzr��szParseExpression.__getitem__cCs|j�|�d|_|Sr�)r�rr{r"ryryrzr�szParseExpression.appendcCs0d|_dd�|jD�|_|jD]}|��q|S)z~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
           all contained expressions.FcSsg|]}|���qSryr�r�r�ryryrzr��sz3ParseExpression.leaveWhitespace.<locals>.<listcomp>)r~r�r)r�r�ryryrzr�s


zParseExpression.leaveWhitespacecsrt|t�rB||jkrntt|��|�|jD]}|�|jd�q*n,tt|��|�|jD]}|�|jd�qX|Sr�)r}r-r�rr"rr�)r�rr�rryrzr�s



zParseExpression.ignorecsNztt|���WStk
r$YnX|jdkrHd|jjt|j�f|_|jS�Nz%s:(%s))	rr"r�rpr{rmr�r�r�r�rryrzr��s
zParseExpression.__str__cs*tt|���|jD]}|��qt|j�dk�r|jd}t||j�r�|js�|jdkr�|j	s�|jdd�|jdg|_d|_
|j|jO_|j|jO_|jd}t||j��r|j�s|jdk�r|j	�s|jdd�|jdd�|_d|_
|j|jO_|j|jO_dt
|�|_|S)Nrsrr�rtr�)rr"r�r�r�r}rmryr|r�r{r�r�r�r�)r�r�rrryrzr��s<


���
���zParseExpression.streamlinecstt|��||�}|Sr�)rr"r�)r�r�r�r�rryrzr�
szParseExpression.setResultsNamecCs6|dd�|g}|jD]}|�|�q|�g�dSr�)r�rr)r�r�tmpr�ryryrzr
s
zParseExpression.validatecs$tt|���}dd�|jD�|_|S)NcSsg|]}|���qSryrr�ryryrzr�%
sz(ParseExpression.copy.<locals>.<listcomp>)rr"r�r�r1rryrzr�#
szParseExpression.copy)F)F)r�r�r�r�r�r�rrrr�r�r�rr�r1ryryrrzr"�s	
"csTeZdZdZGdd�de�Zd�fdd�	Zddd�Zd	d
�Zdd�Z	d
d�Z
�ZS)ra

    Requires all given C{ParseExpression}s to be found in the given order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'+'} operator.
    May also be constructed using the C{'-'} operator, which will suppress backtracking.

    Example::
        integer = Word(nums)
        name_expr = OneOrMore(Word(alphas))

        expr = And([integer("id"),name_expr("name"),integer("age")])
        # more easily written as:
        expr = integer("id") + name_expr("name") + integer("age")
    cseZdZ�fdd�Z�ZS)zAnd._ErrorStopcs&ttj|�j||�d|_|��dS)N�-)rrr�r�r�rrGrryrzr�9
szAnd._ErrorStop.__init__r�ryryrrzr�8
sr�TcsRtt|��||�tdd�|jD��|_|�|jdj�|jdj|_d|_	dS)Ncss|]}|jVqdSr��r�r�ryryrzr�@
szAnd.__init__.<locals>.<genexpr>rT)
rrr�r�r�r�r
rr~r�r�rryrzr�>
s
zAnd.__init__c	Cs�|jdj|||dd�\}}d}|jdd�D]�}t|tj�rDd}q.|r�z|�|||�\}}Wq�tk
rt�Yq�tk
r�}zd|_t�|��W5d}~XYq�t	k
r�t|t
|�|j|��Yq�Xn|�|||�\}}|s�|��r.||7}q.||fS)NrFr�r�T)
r�r�r}rr�r%r�
__traceback__r�r�r�r�r
)	r�rRr�r��
resultlistZ	errorStopr�Z
exprtokensr�ryryrzr�E
s(
z
And.parseImplcCst|t�rt�|�}|�|�Sr��r}r�r&rwrr"ryryrzr!^
s

zAnd.__iadd__cCs6|dd�|g}|jD]}|�|�|jsq2qdSr�)r�rr��r�r��subRecCheckListr�ryryrzrc
s


zAnd.checkRecursioncCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr��{r(css|]}t|�VqdSr�r�r�ryryrzr�o
szAnd.__str__.<locals>.<genexpr>�}�r�r�r{r�r�r�ryryrzr�j
s


 zAnd.__str__)T)T)r�r�r�r�rr�r�r�r!rr�r1ryryrrzr(
s
csDeZdZdZd�fdd�	Zddd�Zdd	�Zd
d�Zdd
�Z�Z	S)ra�
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the expression that matches the longest string will be used.
    May be constructed using the C{'^'} operator.

    Example::
        # construct Or using '^' operator
        
        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789"))
    prints::
        [['123'], ['3.1416'], ['789']]
    Fcs:tt|��||�|jr0tdd�|jD��|_nd|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr��
szOr.__init__.<locals>.<genexpr>T)rrr�r�rBr�r�rryrzr��
szOr.__init__TcCsRd}d}g}|jD]�}z|�||�}Wnvtk
rb}	zd|	_|	j|krR|	}|	j}W5d}	~	XYqtk
r�t|�|kr�t|t|�|j|�}t|�}YqX|�||f�q|�r(|j	dd�d�|D]^\}
}z|�
|||�WStk
�r$}	z d|	_|	j|k�r|	}|	j}W5d}	~	XYq�Xq�|dk	�r@|j|_|�nt||d|��dS)NrtcSs
|dSr�ry)�xryryrzr{�
r|zOr.parseImpl.<locals>.<lambda>)r� no defined alternatives to match)r�r�r!r�r�r�r�r�r�sortr�r�)r�rRr�r��	maxExcLoc�maxExceptionr�r�Zloc2r��_ryryrzr��
s<


zOr.parseImplcCst|t�rt�|�}|�|�Sr�r�r"ryryrz�__ixor__�
s

zOr.__ixor__cCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�z ^ css|]}t|�VqdSr�r�r�ryryrzr��
szOr.__str__.<locals>.<genexpr>r�r�r�ryryrzr��
s


 z
Or.__str__cCs,|dd�|g}|jD]}|�|�qdSr��r�rr�ryryrzr�
s
zOr.checkRecursion)F)T)
r�r�r�r�r�r�r�r�rr1ryryrrzrt
s

&	csDeZdZdZd�fdd�	Zddd�Zdd	�Zd
d�Zdd
�Z�Z	S)ra�
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the first one listed is the one that will match.
    May be constructed using the C{'|'} operator.

    Example::
        # construct MatchFirst using '|' operator
        
        # watch the order of expressions to match
        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

        # put more selective expression first
        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    Fcs:tt|��||�|jr0tdd�|jD��|_nd|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr��
sz&MatchFirst.__init__.<locals>.<genexpr>T)rrr�r�rBr�r�rryrzr��
szMatchFirst.__init__Tc	Cs�d}d}|jD]�}z|�|||�}|WStk
r`}z|j|krP|}|j}W5d}~XYqtk
r�t|�|kr�t|t|�|j|�}t|�}YqXq|dk	r�|j|_|�nt||d|��dS)Nrtr�)r�r�r!r�r�r�r�r�)	r�rRr�r�r�r�r�r�r�ryryrzr��
s$


zMatchFirst.parseImplcCst|t�rt�|�}|�|�Sr�r�r"ryryrz�__ior__�
s

zMatchFirst.__ior__cCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�� | css|]}t|�VqdSr�r�r�ryryrzr��
sz%MatchFirst.__str__.<locals>.<genexpr>r�r�r�ryryrzr��
s


 zMatchFirst.__str__cCs,|dd�|g}|jD]}|�|�qdSr�r�r�ryryrzrs
zMatchFirst.checkRecursion)F)T)
r�r�r�r�r�r�r�r�rr1ryryrrzr�
s
	cs<eZdZdZd�fdd�	Zddd�Zdd�Zd	d
�Z�ZS)
ram
    Requires all given C{ParseExpression}s to be found, but in any order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'&'} operator.

    Example::
        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
        integer = Word(nums)
        shape_attr = "shape:" + shape_type("shape")
        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
        color_attr = "color:" + color("color")
        size_attr = "size:" + integer("size")

        # use Each (using operator '&') to accept attributes in any order 
        # (shape and posn are required, color and size are optional)
        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

        shape_spec.runTests('''
            shape: SQUARE color: BLACK posn: 100, 120
            shape: CIRCLE size: 50 color: BLUE posn: 50,80
            color:GREEN size:20 shape:TRIANGLE posn:20,40
            '''
            )
    prints::
        shape: SQUARE color: BLACK posn: 100, 120
        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
        - color: BLACK
        - posn: ['100', ',', '120']
          - x: 100
          - y: 120
        - shape: SQUARE


        shape: CIRCLE size: 50 color: BLUE posn: 50,80
        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
        - color: BLUE
        - posn: ['50', ',', '80']
          - x: 50
          - y: 80
        - shape: CIRCLE
        - size: 50


        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
        - color: GREEN
        - posn: ['20', ',', '40']
          - x: 20
          - y: 40
        - shape: TRIANGLE
        - size: 20
    Tcs8tt|��||�tdd�|jD��|_d|_d|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr�?sz Each.__init__.<locals>.<genexpr>T)rrr�r�r�r�r~�initExprGroupsr�rryrzr�=sz
Each.__init__c	s�|jr�tdd�|jD��|_dd�|jD�}dd�|jD�}|||_dd�|jD�|_dd�|jD�|_dd�|jD�|_|j|j7_d	|_|}|jdd�}|jdd��g}d
}	|	�rj|�|j|j}
g}|
D]v}z|�||�}Wn t	k
�r|�
|�Yq�X|�
|j�t|�|��||k�r@|�
|�q�|�kr܈�
|�q�t|�t|
�kr�d	}	q�|�r�d�dd�|D��}
t	||d
|
��|�fdd�|jD�7}g}|D]"}|�|||�\}}|�
|��q�t|tg��}||fS)Ncss&|]}t|t�rt|j�|fVqdSr�)r}rr!rSr�ryryrzr�Es
z!Each.parseImpl.<locals>.<genexpr>cSsg|]}t|t�r|j�qSry�r}rrSr�ryryrzr�Fs
z"Each.parseImpl.<locals>.<listcomp>cSs g|]}|jrt|t�s|�qSry)r�r}rr�ryryrzr�Gs
cSsg|]}t|t�r|j�qSry)r}r4rSr�ryryrzr�Is
cSsg|]}t|t�r|j�qSry)r}rrSr�ryryrzr�Js
cSs g|]}t|tttf�s|�qSry)r}rr4rr�ryryrzr�KsFTr%css|]}t|�VqdSr�r�r�ryryrzr�fsz*Missing one or more required elements (%s)cs$g|]}t|t�r|j�kr|�qSryr�r��ZtmpOptryrzr�js

)r�r�r�Zopt1mapZ	optionalsZmultioptionalsZ
multirequiredZrequiredr�r!rr�r!�remover�r�r��sumr$)r�rRr�r�Zopt1Zopt2ZtmpLocZtmpReqdZ
matchOrderZkeepMatchingZtmpExprsZfailedr�Zmissingr�rNZfinalResultsryr�rzr�CsP

zEach.parseImplcCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�z & css|]}t|�VqdSr�r�r�ryryrzr�yszEach.__str__.<locals>.<genexpr>r�r�r�ryryrzr�ts


 zEach.__str__cCs,|dd�|g}|jD]}|�|�qdSr�r�r�ryryrzr}s
zEach.checkRecursion)T)T)	r�r�r�r�r�r�r�rr1ryryrrzrs
5
1	csleZdZdZd�fdd�	Zddd�Zdd	�Z�fd
d�Z�fdd
�Zdd�Z	gfdd�Z
�fdd�Z�ZS)r za
    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    Fcs�tt|��|�t|t�r@ttjt�r2t�|�}nt�t	|��}||_
d|_|dk	r�|j|_|j
|_
|�|j�|j|_|j|_|j|_|j�|j�dSr�)rr r�r}r��
issubclassr&rwr.rrSr{r�r�r
rr~r}r�r�r�r�rSr�rryrzr��s
zParseElementEnhance.__init__TcCs2|jdk	r|jj|||dd�Std||j|��dS)NFr�r�)rSr�r!r�r�ryryrzr��s
zParseElementEnhance.parseImplcCs*d|_|j��|_|jdk	r&|j��|Sr�)r~rSr�rr�ryryrzr�s


z#ParseElementEnhance.leaveWhitespacecsrt|t�rB||jkrntt|��|�|jdk	rn|j�|jd�n,tt|��|�|jdk	rn|j�|jd�|Sr�)r}r-r�rr rrSr"rryrzr�s



zParseElementEnhance.ignorecs&tt|���|jdk	r"|j��|Sr�)rr r�rSr�rryrzr��s

zParseElementEnhance.streamlinecCsB||krt||g��|dd�|g}|jdk	r>|j�|�dSr�)r(rSr)r�r�r�ryryrzr�s

z"ParseElementEnhance.checkRecursioncCs6|dd�|g}|jdk	r(|j�|�|�g�dSr��rSrr�r�rr�ryryrzr�s
zParseElementEnhance.validatecsXztt|���WStk
r$YnX|jdkrR|jdk	rRd|jjt|j�f|_|jSr�)	rr r�rpr{rSrmr�r�r�rryrzr��szParseElementEnhance.__str__)F)T)
r�r�r�r�r�r�rrr�rrr�r1ryryrrzr �s
cs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Lookahead matching of the given parse expression.  C{FollowedBy}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression matches at the current
    position.  C{FollowedBy} always returns a null token list.

    Example::
        # use FollowedBy to match a label only if it is followed by a ':'
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    prints::
        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    cstt|��|�d|_dSr�)rrr�r��r�rSrryrzr��szFollowedBy.__init__TcCs|j�||�|gfSr�)rSr�r�ryryrzr��szFollowedBy.parseImpl)Tr5ryryrrzr�scs2eZdZdZ�fdd�Zd	dd�Zdd�Z�ZS)
ra�
    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression does I{not} match at the current
    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    always returns a null token list.  May be constructed using the '~' operator.

    Example::
        
    cs0tt|��|�d|_d|_dt|j�|_dS)NFTzFound unwanted token, )rrr�r~r�r�rSr�r�rryrzr��szNotAny.__init__TcCs&|j�||�rt|||j|��|gfSr�)rSr�r!r�r�ryryrzr��szNotAny.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�z~{r��r�r�r{r�rSr�ryryrzr�s


zNotAny.__str__)Trfryryrrzr�s

cs(eZdZd�fdd�	Zddd�Z�ZS)	�_MultipleMatchNcsFtt|��|�d|_|}t|t�r.t�|�}|dk	r<|nd|_dSr�)	rr�r�r}r}r�r&rw�	not_ender)r�rS�stopOnZenderrryrzr�s

z_MultipleMatch.__init__Tc	Cs�|jj}|j}|jdk	}|r$|jj}|r2|||�||||dd�\}}zV|j}	|r`|||�|	rp|||�}
n|}
|||
|�\}}|s�|��rR||7}qRWnttfk
r�YnX||fS�NFr�)	rSr�r�r�r�r�r
r!r�)r�rRr�r�Zself_expr_parseZself_skip_ignorablesZcheck_enderZ
try_not_enderr�ZhasIgnoreExprsr�Z	tmptokensryryrzr�s*



z_MultipleMatch.parseImpl)N)T)r�r�r�r�r�r1ryryrrzr�
sr�c@seZdZdZdd�ZdS)ra�
    Repetition of one or more of the given expression.
    
    Parameters:
     - expr - expression that must match one or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: BLACK"
        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
        
        # could also be written as
        (attr_expr * (1,)).parseString(text).pprint()
    cCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r�z}...r�r�ryryrzr�Js


zOneOrMore.__str__N)r�r�r�r�r�ryryryrzr0scs8eZdZdZd
�fdd�	Zd�fdd�	Zdd	�Z�ZS)r4aw
    Optional repetition of zero or more of the given expression.
    
    Parameters:
     - expr - expression that must match zero or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example: similar to L{OneOrMore}
    Ncstt|�j||d�d|_dS)N)r�T)rr4r�r�)r�rSr�rryrzr�_szZeroOrMore.__init__Tc	s<ztt|��|||�WSttfk
r6|gfYSXdSr�)rr4r�r!r�r�rryrzr�cszZeroOrMore.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r$�]...r�r�ryryrzr�is


zZeroOrMore.__str__)N)Trfryryrrzr4Ssc@s eZdZdd�ZeZdd�ZdS)�
_NullTokencCsdSr�ryr�ryryrzr�ssz_NullToken.__bool__cCsdSr�ryr�ryryrzr�vsz_NullToken.__str__N)r�r�r�r�rLr�ryryryrzr�rsr�cs6eZdZdZef�fdd�	Zd	dd�Zdd�Z�ZS)
raa
    Optional matching of the given expression.

    Parameters:
     - expr - expression that must match zero or more times
     - default (optional) - value to be returned if the optional expression is not found.

    Example::
        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
        zip.runTests('''
            # traditional ZIP code
            12345
            
            # ZIP+4 form
            12101-0001
            
            # invalid ZIP
            98765-
            ''')
    prints::
        # traditional ZIP code
        12345
        ['12345']

        # ZIP+4 form
        12101-0001
        ['12101-0001']

        # invalid ZIP
        98765-
             ^
        FAIL: Expected end of text (at char 5), (line:1, col:6)
    cs.tt|�j|dd�|jj|_||_d|_dS)NFr3T)rrr�rSr}rr�)r�rSrrryrzr��s
zOptional.__init__Tc	Cszz|jj|||dd�\}}WnTttfk
rp|jtk	rh|jjr^t|jg�}|j||jj<ql|jg}ng}YnX||fSr�)rSr�r!r�r�_optionalNotMatchedr|r$)r�rRr�r�r�ryryrzr��s


zOptional.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r$r'r�r�ryryrzr��s


zOptional.__str__)T)	r�r�r�r�r�r�r�r�r1ryryrrzrzs"
cs,eZdZdZd	�fdd�	Zd
dd�Z�ZS)r*a�	
    Token for skipping over all undefined text until the matched expression is found.

    Parameters:
     - expr - target expression marking the end of the data to be skipped
     - include - (default=C{False}) if True, the target expression is also parsed 
          (the skipped text and target expression are returned as a 2-element list).
     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
          comments) that might contain false matches to the target expression
     - failOn - (default=C{None}) define expressions that are not allowed to be 
          included in the skipped test; if found before the target expression is found, 
          the SkipTo is not a match

    Example::
        report = '''
            Outstanding Issues Report - 1 Jan 2000

               # | Severity | Description                               |  Days Open
            -----+----------+-------------------------------------------+-----------
             101 | Critical | Intermittent system crash                 |          6
              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
              79 | Minor    | System slow when running too many reports |         47
            '''
        integer = Word(nums)
        SEP = Suppress('|')
        # use SkipTo to simply match everything up until the next SEP
        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
        # - parse action will call token.strip() for each matched token, i.e., the description body
        string_data = SkipTo(SEP, ignore=quotedString)
        string_data.setParseAction(tokenMap(str.strip))
        ticket_expr = (integer("issue_num") + SEP 
                      + string_data("sev") + SEP 
                      + string_data("desc") + SEP 
                      + integer("days_open"))
        
        for tkt in ticket_expr.searchString(report):
            print tkt.dump()
    prints::
        ['101', 'Critical', 'Intermittent system crash', '6']
        - days_open: 6
        - desc: Intermittent system crash
        - issue_num: 101
        - sev: Critical
        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
        - days_open: 14
        - desc: Spelling error on Login ('log|n')
        - issue_num: 94
        - sev: Cosmetic
        ['79', 'Minor', 'System slow when running too many reports', '47']
        - days_open: 47
        - desc: System slow when running too many reports
        - issue_num: 79
        - sev: Minor
    FNcs`tt|��|�||_d|_d|_||_d|_t|t	�rFt
�|�|_n||_dt
|j�|_dS)NTFzNo match found for )rr*r��
ignoreExprr�r��includeMatchr�r}r�r&rw�failOnr�rSr�)r�rZincluderr�rryrzr��s
zSkipTo.__init__Tc	Cs&|}t|�}|j}|jj}|jdk	r,|jjnd}|jdk	rB|jjnd}	|}
|
|kr�|dk	rf|||
�rfq�|	dk	r�z|	||
�}
Wqntk
r�Yq�YqnXqnz|||
ddd�Wq�tt	fk
r�|
d7}
YqJXq�qJt|||j
|��|
}|||�}t|�}|j�r||||dd�\}}
||
7}||fS)NF)r�r�r�r�)
r�rSr�r�r�r�r�rr!r�r�r$r�)r�rRr�r�rUr�rSZ
expr_parseZself_failOn_canParseNextZself_ignoreExpr_tryParseZtmplocZskiptextZ
skipresultrMryryrzr��s:
zSkipTo.parseImpl)FNN)Tr5ryryrrzr*�s6
csbeZdZdZd�fdd�	Zdd�Zdd�Zd	d
�Zdd�Zgfd
d�Z	dd�Z
�fdd�Z�ZS)raK
    Forward declaration of an expression to be defined later -
    used for recursive grammars, such as algebraic infix notation.
    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
    Specifically, '|' has a lower precedence than '<<', so that::
        fwdExpr << a | b | c
    will actually be evaluated as::
        (fwdExpr << a) | b | c
    thereby leaving b and c out as parseable alternatives.  It is recommended that you
    explicitly group the values inserted into the C{Forward}::
        fwdExpr << (a | b | c)
    Converting to use the '<<=' operator instead will avoid this problem.

    See L{ParseResults.pprint} for an example of a recursive parser created using
    C{Forward}.
    Ncstt|�j|dd�dSr2)rrr�r"rryrzr�@szForward.__init__cCsjt|t�rt�|�}||_d|_|jj|_|jj|_|�|jj	�|jj
|_
|jj|_|j�
|jj�|Sr�)r}r�r&rwrSr{r�r�r
rr~r}r�rr"ryryrz�
__lshift__Cs





zForward.__lshift__cCs||>Sr�ryr"ryryrz�__ilshift__PszForward.__ilshift__cCs
d|_|Sr�rr�ryryrzrSszForward.leaveWhitespacecCs$|js d|_|jdk	r |j��|Sr�)r�rSr�r�ryryrzr�Ws


zForward.streamlinecCs>||kr0|dd�|g}|jdk	r0|j�|�|�g�dSr�r�r�ryryrzr^s

zForward.validatecCsVt|d�r|jS|jjdSz|jdk	r4t|j�}nd}W5|j|_X|jjd|S)Nr�z: ...�Nonez: )r�r�rmr�Z_revertClass�_ForwardNoRecurserSr�)r�Z	retStringryryrzr�es


zForward.__str__cs.|jdk	rtt|���St�}||K}|SdSr�)rSrrr�r1rryrzr�vs

zForward.copy)N)
r�r�r�r�r�r�r�rr�rr�r�r1ryryrrzr-s
c@seZdZdd�ZdS)r�cCsdS)Nrcryr�ryryrzr�sz_ForwardNoRecurse.__str__N)r�r�r�r�ryryryrzr�~sr�cs"eZdZdZd�fdd�	Z�ZS)r/zQ
    Abstract subclass of C{ParseExpression}, for converting parsed results.
    Fcstt|��|�d|_dSr�)rr/r�r}r�rryrzr��szTokenConverter.__init__)Fr4ryryrrzr/�scs6eZdZdZd
�fdd�	Z�fdd�Zdd	�Z�ZS)ra�
    Converter to concatenate all matching tokens to a single string.
    By default, the matching patterns must also be contiguous in the input string;
    this can be disabled by specifying C{'adjacent=False'} in the constructor.

    Example::
        real = Word(nums) + '.' + Word(nums)
        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
        # will also erroneously match the following
        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

        real = Combine(Word(nums) + '.' + Word(nums))
        print(real.parseString('3.1416')) # -> ['3.1416']
        # no match when there are internal spaces
        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    r�Tcs8tt|��|�|r|��||_d|_||_d|_dSr�)rrr�r�adjacentr~�
joinStringr�)r�rSr�r�rryrzr��szCombine.__init__cs(|jrt�||�ntt|��|�|Sr�)r�r&rrrr"rryrzr�szCombine.ignorecCsP|��}|dd�=|td�|�|j��g|jd�7}|jrH|��rH|gS|SdS)Nr�)r�)r�r$r�r(r�r�r|r
)r�rRr�r�ZretToksryryrzr��s
"zCombine.postParse)r�T)r�r�r�r�r�rr�r1ryryrrzr�s
cs(eZdZdZ�fdd�Zdd�Z�ZS)ra�
    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

    Example::
        ident = Word(alphas)
        num = Word(nums)
        term = ident | num
        func = ident + Optional(delimitedList(term))
        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

        func = ident + Group(Optional(delimitedList(term)))
        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    cstt|��|�d|_dSr�)rrr�r}r�rryrzr��szGroup.__init__cCs|gSr�ryr�ryryrzr��szGroup.postParse�r�r�r�r�r�r�r1ryryrrzr�s
cs(eZdZdZ�fdd�Zdd�Z�ZS)r
aW
    Converter to return a repetitive expression as a list, but also as a dictionary.
    Each element can also be referenced using the first token in the expression as its key.
    Useful for tabular report scraping when the first column can be used as a item key.

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        # print attributes as plain groups
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
        print(result.dump())
        
        # access named fields as dict entries, or output as dict
        print(result['shape'])        
        print(result.asDict())
    prints::
        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    See more examples at L{ParseResults} of accessing fields by results name.
    cstt|��|�d|_dSr�)rr
r�r}r�rryrzr��sz
Dict.__init__cCs�t|�D]�\}}t|�dkrq|d}t|t�r@t|d���}t|�dkr\td|�||<qt|�dkr�t|dt�s�t|d|�||<q|��}|d=t|�dks�t|t�r�|�	�r�t||�||<qt|d|�||<q|j
r�|gS|SdS)Nrr�r�rs)r�r�r}rvr�r�r�r$r�r
r|)r�rRr�r�r��tokZikeyZ	dictvalueryryrzr��s$
zDict.postParser�ryryrrzr
�s#c@s eZdZdZdd�Zdd�ZdS)r-aV
    Converter for ignoring the results of a parsed expression.

    Example::
        source = "a, b, c,d"
        wd = Word(alphas)
        wd_list1 = wd + ZeroOrMore(',' + wd)
        print(wd_list1.parseString(source))

        # often, delimiters that are useful during parsing are just in the
        # way afterward - use Suppress to keep them out of the parsed output
        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
        print(wd_list2.parseString(source))
    prints::
        ['a', ',', 'b', ',', 'c', ',', 'd']
        ['a', 'b', 'c', 'd']
    (See also L{delimitedList}.)
    cCsgSr�ryr�ryryrzr�szSuppress.postParsecCs|Sr�ryr�ryryrzr
"szSuppress.suppressN)r�r�r�r�r�r
ryryryrzr-sc@s(eZdZdZdd�Zdd�Zdd�ZdS)	rzI
    Wrapper for parse actions, to ensure they are only called once.
    cCst|�|_d|_dSr�)rr�callable�called)r�Z
methodCallryryrzr�*s
zOnlyOnce.__init__cCs.|js|�|||�}d|_|St||d��dS)NTr�)r�r�r!)r�r�r[rxrNryryrzr	-s
zOnlyOnce.__call__cCs
d|_dSr�)r�r�ryryrz�reset3szOnlyOnce.resetN)r�r�r�r�r�r	r�ryryryrzr&scs:t����fdd�}z�j|_Wntk
r4YnX|S)at
    Decorator for debugging parse actions. 
    
    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

    Example::
        wd = Word(alphas)

        @traceParseAction
        def remove_duplicate_chars(tokens):
            return ''.join(sorted(set(''.join(tokens))))

        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    prints::
        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
        <<leaving remove_duplicate_chars (ret: 'dfjkls')
        ['dfjkls']
    c
s��j}|dd�\}}}t|�dkr8|djjd|}tj�d|t||�||f�z�|�}Wn8tk
r�}ztj�d||f��W5d}~XYnXtj�d||f�|S)Nr^rqr�.z">>entering %s(line: '%s', %d, %r)
z<<leaving %s (exception: %s)
z<<leaving %s (ret: %r)
)r�r�rmr��stderr�writerIrp)ZpaArgsZthisFuncr�r[rxr�rX�rryrz�zLsztraceParseAction.<locals>.z)rrr�r�)rr�ryr�rzrd6s
�,FcCs`t|�dt|�dt|�d}|rBt|t||���|�S|tt|�|��|�SdS)a�
    Helper to define a delimited list of expressions - the delimiter defaults to ','.
    By default, the list elements and delimiters can have intervening whitespace, and
    comments, but this can be overridden by passing C{combine=True} in the constructor.
    If C{combine} is set to C{True}, the matching tokens are returned as a single token
    string, with the delimiters included; otherwise, the matching tokens are returned
    as a list of tokens, with the delimiters suppressed.

    Example::
        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    z [r(r�N)r�rr4r�r-)rSZdelim�combineZdlNameryryrzrBbs
$csjt����fdd�}|dkr0tt��dd��}n|��}|�d�|j|dd�|��d	t��d
�S)a:
    Helper to define a counted list of expressions.
    This helper defines a pattern of the form::
        integer expr expr expr...
    where the leading integer tells how many expr expressions follow.
    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    
    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

    Example::
        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

        # in this parser, the leading integer value is given in binary,
        # '10' indicating that 2 values are in the array
        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    cs.|d}�|r tt�g|��p&tt�>gSr�)rrrE)r�r[rxr��Z	arrayExprrSryrz�countFieldParseAction�s"z+countedArray.<locals>.countFieldParseActionNcSst|d�Sr�)rvrwryryrzr{�r|zcountedArray.<locals>.<lambda>ZarrayLenT�r�z(len) rc)rr1rTr�r�r�r�r�)rSZintExprr�ryr�rzr>us
cCs6g}|D](}t|t�r&|�t|��q|�|�q|Sr�)r}r�rr�r)�Lr�r�ryryrzr��s
r�cs6t���fdd�}|j|dd���dt|���S)a*
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousLiteral(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    If this is not desired, use C{matchPreviousExpr}.
    Do I{not} use with packrat parsing enabled.
    csP|rBt|�dkr�|d>qLt|���}�tdd�|D��>n
�t�>dS)Nr�rcss|]}t|�VqdSr�)r�r�Zttryryrzr��szDmatchPreviousLiteral.<locals>.copyTokenToRepeater.<locals>.<genexpr>)r�r�r�rr)r�r[rxZtflat�Zrepryrz�copyTokenToRepeater�sz1matchPreviousLiteral.<locals>.copyTokenToRepeaterTr��(prev) )rr�r�r�)rSr�ryr�rzrQ�s


csFt��|��}�|K��fdd�}|j|dd���dt|���S)aS
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousExpr(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    the expressions are evaluated first, and then compared, so
    C{"1"} is compared with C{"10"}.
    Do I{not} use with packrat parsing enabled.
    cs*t|�����fdd�}�j|dd�dS)Ncs$t|���}|�kr tddd��dS)Nr�r)r�r�r!)r�r[rxZtheseTokens�ZmatchTokensryrz�mustMatchTheseTokens�szLmatchPreviousExpr.<locals>.copyTokenToRepeater.<locals>.mustMatchTheseTokensTr�)r�r�r�)r�r[rxr�r�r�rzr��sz.matchPreviousExpr.<locals>.copyTokenToRepeaterTr�r�)rr�r�r�r�)rSZe2r�ryr�rzrP�scCs:dD]}|�|t|�}q|�dd�}|�dd�}t|�S)Nz\^-]r2r'r~r�)r��_bslashr�)r�r�ryryrzrY�s
rYTc
s�|rdd�}dd�}t�ndd�}dd�}t�g}t|t�rF|��}n$t|t�rZt|�}ntjdt	dd�|stt
�Sd	}|t|�d
k�r||}t||d
d��D]R\}}	||	|�r�|||d
=qxq�|||	�r�|||d
=|�
||	�|	}qxq�|d
7}qx|�s�|�r�zlt|�td�|��k�rTtd
d�dd�|D����d�|��WStd�dd�|D����d�|��WSWn&tk
�r�tjdt	dd�YnXt�fdd�|D���d�|��S)a�
    Helper to quickly define a set of alternative Literals, and makes sure to do
    longest-first testing when there is a conflict, regardless of the input order,
    but returns a C{L{MatchFirst}} for best performance.

    Parameters:
     - strs - a string of space-delimited literals, or a collection of string literals
     - caseless - (default=C{False}) - treat all literals as caseless
     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
          if creating a C{Regex} raises an exception)

    Example::
        comp_oper = oneOf("< = > <= >= !=")
        var = Word(alphas)
        number = Word(nums)
        term = var | number
        comparison_expr = term + comp_oper + term
        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    prints::
        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    cSs|��|��kSr�)r@�r�bryryrzr{�r|zoneOf.<locals>.<lambda>cSs|���|���Sr�)r@r<r�ryryrzr{�r|cSs||kSr�ryr�ryryrzr{�r|cSs
|�|�Sr�)r<r�ryryrzr{�r|z6Invalid argument to oneOf, expected string or iterablersr�rr�Nr�z[%s]css|]}t|�VqdSr�)rY�r�Zsymryryrzr�szoneOf.<locals>.<genexpr>r��|css|]}t�|�VqdSr�)r�r[r�ryryrzr�sz7Exception creating Regex for oneOf, building MatchFirstc3s|]}�|�VqdSr�ryr��ZparseElementClassryrzr�$s)r
rr}r�r�rr�r�r�r�rr�r�rr�r)r�rpr)
Zstrsr?ZuseRegexZisequalZmasksZsymbolsr�Zcurr�rryr�rzrU�sT



�


**�cCsttt||���S)a�
    Helper to easily and clearly define a dictionary by specifying the respective patterns
    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    in the proper order.  The key pattern can include delimiting markers or punctuation,
    as long as they are suppressed, thereby leaving the significant key text.  The value
    pattern can include named results, so that the C{Dict} results can include named token
    fields.

    Example::
        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        attr_label = label
        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

        # similar to Dict, but simpler call format
        result = dictOf(attr_label, attr_value).parseString(text)
        print(result.dump())
        print(result['shape'])
        print(result.shape)  # object attribute access works too
        print(result.asDict())
    prints::
        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        SQUARE
        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    )r
r4r)rr�ryryrzrC&s!cCs^t��dd��}|��}d|_|d�||d�}|r@dd�}ndd�}|�|�|j|_|S)	a�
    Helper to return the original, untokenized text for a given expression.  Useful to
    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    revert separate tokens with intervening whitespace back to the original matching
    input text. By default, returns astring containing the original parsed text.  
       
    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    C{L{ParseResults}} containing any results names that were originally matched, and a 
    single token containing the original matched text from the input string.  So if 
    the expression passed to C{L{originalTextFor}} contains expressions with defined
    results names, you must set C{asString} to C{False} if you want to preserve those
    results name values.

    Example::
        src = "this is test <b> bold <i>text</i> </b> normal text "
        for tag in ("b","i"):
            opener,closer = makeHTMLTags(tag)
            patt = originalTextFor(opener + SkipTo(closer) + closer)
            print(patt.searchString(src)[0])
    prints::
        ['<b> bold <i>text</i> </b>']
        ['<i>text</i>']
    cSs|Sr�ry)r�r�rxryryrzr{ar|z!originalTextFor.<locals>.<lambda>F�_original_start�
_original_endcSs||j|j�Sr�)r�r�rZryryrzr{fr|cSs&||�d�|�d��g|dd�<dS)Nr�r�)rrZryryrz�extractTexthsz$originalTextFor.<locals>.extractText)rr�r�r�r�)rSZasStringZ	locMarkerZendlocMarker�	matchExprr�ryryrzriIs

cCst|��dd��S)zp
    Helper to undo pyparsing's default grouping of And expressions, even
    if all but one are non-empty.
    cSs|dSr�ryrwryryrzr{sr|zungroup.<locals>.<lambda>)r/r�)rSryryrzrjnscCs4t��dd��}t|d�|d�|����d��S)a�
    Helper to decorate a returned token with its starting and ending locations in the input string.
    This helper adds the following results names:
     - locn_start = location where matched expression begins
     - locn_end = location where matched expression ends
     - value = the actual parsed results

    Be careful if the input text contains C{<TAB>} characters, you may want to call
    C{L{ParserElement.parseWithTabs}}

    Example::
        wd = Word(alphas)
        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
            print(match)
    prints::
        [[0, 'ljsdf', 5]]
        [[8, 'lksdjjf', 15]]
        [[18, 'lkkjj', 23]]
    cSs|Sr�ryrZryryrzr{�r|zlocatedExpr.<locals>.<lambda>Z
locn_startr�Zlocn_end)rr�rr�r)rSZlocatorryryrzrlusz\[]-*.$+^?()~ �r_cCs|ddSr�ryrZryryrzr{�r|r{z\\0?[xX][0-9a-fA-F]+cCstt|d�d�d��S)Nrz\0x�)�unichrrv�lstriprZryryrzr{�r|z	\\0[0-7]+cCstt|ddd�d��S)Nrr��)r�rvrZryryrzr{�r|z\]r�r$r)�negate�bodyr'csFdd��z"d��fdd�t�|�jD��WStk
r@YdSXdS)a�
    Helper to easily define string ranges for use in Word construction.  Borrows
    syntax from regexp '[]' string range definitions::
        srange("[0-9]")   -> "0123456789"
        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    The input string must be enclosed in []'s, and the returned string is the expanded
    character set joined into a single string.
    The values enclosed in the []'s may be:
     - a single character
     - an escaped character with a leading backslash (such as C{\-} or C{\]})
     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
         (C{\0x##} is also supported for backwards compatibility) 
     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    cSs<t|t�s|Sd�dd�tt|d�t|d�d�D��S)Nr�css|]}t|�VqdSr�)r�r�ryryrzr��sz+srange.<locals>.<lambda>.<locals>.<genexpr>rr�)r}r$r�r��ord)�pryryrzr{�r|zsrange.<locals>.<lambda>r�c3s|]}�|�VqdSr�ry)r��part�Z	_expandedryrzr��szsrange.<locals>.<genexpr>N)r��_reBracketExprr�r�rprdryr�rzra�s
"cs�fdd�}|S)zt
    Helper method for defining parse actions that require matching at a specific
    column in the input text.
    cs"t||��krt||d���dS)Nzmatched token not at column %dr�)rNZlocnrVr�ryrz�	verifyCol�sz!matchOnlyAtCol.<locals>.verifyColry)r�r�ryr�rzrO�scs�fdd�S)a�
    Helper method for common parse actions that simply return a literal value.  Especially
    useful when used with C{L{transformString<ParserElement.transformString>}()}.

    Example::
        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
        term = na | num
        
        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    cs�gSr�ryrZ�ZreplStrryrzr{�r|zreplaceWith.<locals>.<lambda>ryr�ryr�rzr^�scCs|ddd�S)a
    Helper parse action for removing quotation marks from parsed quoted strings.

    Example::
        # by default, quotation marks are included in parsed results
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

        # use removeQuotes to strip quotation marks from parsed results
        quotedString.setParseAction(removeQuotes)
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    rr�rtryrZryryrzr\�scsN��fdd�}zt�dt�d�j�}Wntk
rBt��}YnX||_|S)aG
    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    args are passed, they are forwarded to the given function as additional arguments after
    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    parsed data to an integer using base 16.

    Example (compare the last to example in L{ParserElement.transformString}::
        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
        hex_ints.runTests('''
            00 11 22 aa FF 0a 0d 1a
            ''')
        
        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
        OneOrMore(upperword).runTests('''
            my kingdom for a horse
            ''')

        wd = Word(alphas).setParseAction(tokenMap(str.title))
        OneOrMore(wd).setParseAction(' '.join).runTests('''
            now is the winter of our discontent made glorious summer by this sun of york
            ''')
    prints::
        00 11 22 aa FF 0a 0d 1a
        [0, 17, 34, 170, 255, 10, 13, 26]

        my kingdom for a horse
        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

        now is the winter of our discontent made glorious summer by this sun of york
        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    cs��fdd�|D�S)Ncsg|]}�|f����qSryry)r�Ztokn�r�r\ryrzr��sz(tokenMap.<locals>.pa.<locals>.<listcomp>ryrZr�ryrzr��sztokenMap.<locals>.par�rm)ror�rpr~)r\r�r�rqryr�rzro�s 
�cCst|���Sr��r�r@rwryryrzr{r|cCst|���Sr��r��lowerrwryryrzr{r|c	Cs�t|t�r|}t||d�}n|j}tttd�}|r�t���	t
�}td�|d�tt
t|td�|���tddgd��d	��	d
d��td�}n�d
�dd�tD��}t���	t
�t|�B}td�|d�tt
t|�	t�ttd�|����tddgd��d	��	dd��td�}ttd�|d�}|�dd
�|�dd��������d|�}|�dd
�|�dd��������d|�}||_||_||fS)zRInternal helper to construct opening and closing tag expressions, given a tag namerEz_-:r5�tag�=�/F�rrEcSs|ddkS�Nrr�ryrZryryrzr{r|z_makeTags.<locals>.<lambda>r6r�css|]}|dkr|VqdS)r6Nryr�ryryrzr�sz_makeTags.<locals>.<genexpr>cSs|ddkSr�ryrZryryrzr{r|r7rJ�:r(z<%s>r`z</%s>)r}r�rr�r1r6r5r@r�r�r\r-r
r4rrr�r�rXr[rDr�_Lr��titler�r�r�)�tagStrZxmlZresnameZtagAttrNameZtagAttrValueZopenTagZprintablesLessRAbrackZcloseTagryryrz�	_makeTagss>
�������..rcCs
t|d�S)a 
    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

    Example::
        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
        a,a_end = makeHTMLTags("A")
        link_expr = a + SkipTo(a_end)("link_text") + a_end
        
        for link in link_expr.searchString(text):
            # attributes in the <A> tag (like "href" shown here) are also accessible as named results
            print(link.link_text, '->', link.href)
    prints::
        pyparsing -> http://pyparsing.wikispaces.com
    F�r�r�ryryrzrM(scCs
t|d�S)z�
    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    tags only in the given upper/lower case.

    Example: similar to L{makeHTMLTags}
    TrrryryrzrN;scs8|r|dd��n|���dd��D���fdd�}|S)a<
    Helper to create a validating parse action to be used with start tags created
    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    with a required attribute value, to avoid false matches on common tags such as
    C{<TD>} or C{<DIV>}.

    Call C{withAttribute} with a series of attribute names and values. Specify the list
    of filter attributes names and values as:
     - keyword arguments, as in C{(align="right")}, or
     - as an explicit dict with C{**} operator, when an attribute name is also a Python
          reserved word, as in C{**{"class":"Customer", "align":"right"}}
     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
    For attribute names with a namespace prefix, you must use the second form.  Attribute
    names are matched insensitive to upper/lower case.
       
    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.

    To verify that the attribute exists, but without specifying a value, pass
    C{withAttribute.ANY_VALUE} as the value.

    Example::
        html = '''
            <div>
            Some text
            <div type="grid">1 4 0 1 0</div>
            <div type="graph">1,3 2,3 1,1</div>
            <div>this has no type</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")

        # only match div tag having a type attribute with value "grid"
        div_grid = div().setParseAction(withAttribute(type="grid"))
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        # construct a match with any div tag having a type attribute, regardless of the value
        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    NcSsg|]\}}||f�qSryryr/ryryrzr�zsz!withAttribute.<locals>.<listcomp>csZ�D]P\}}||kr$t||d|��|tjkr|||krt||d||||f��qdS)Nzno matching attribute z+attribute '%s' has value '%s', must be '%s')r!rg�	ANY_VALUE)r�r[r�ZattrNameZ	attrValue�Zattrsryrzr�{s�zwithAttribute.<locals>.pa)r�)r�ZattrDictr�ryrrzrgDs2cCs|rd|nd}tf||i�S)a�
    Simplified version of C{L{withAttribute}} when matching on a div class - made
    difficult because C{class} is a reserved word in Python.

    Example::
        html = '''
            <div>
            Some text
            <div class="grid">1 4 0 1 0</div>
            <div class="graph">1,3 2,3 1,1</div>
            <div>this &lt;div&gt; has no class</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")
        div_grid = div().setParseAction(withClass("grid"))
        
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    z%s:class�class)rg)Z	classname�	namespaceZ	classattrryryrzrm�s �(rpcCs�t�}||||B}t|�D�]l\}}|ddd�\}}	}
}|	dkrPd|nd|}|	dkr�|dkstt|�dkr|td��|\}
}t��|�}|
tjk�r^|	d	kr�t||�t|t	|��}n�|	dk�r|dk	r�t|||�t|t	||��}nt||�t|t	|��}nD|	dk�rTt||
|||�t||
|||�}ntd
��n�|
tj
k�rB|	d	k�r�t|t��s�t|�}t|j
|�t||�}n�|	dk�r�|dk	�r�t|||�t|t	||��}nt||�t|t	|��}nD|	dk�r8t||
|||�t||
|||�}ntd
��ntd��|�rvt|ttf��rl|j|�n
|�|�||�|�|BK}|}q||K}|S)aD

    Helper method for constructing grammars of expressions made up of
    operators working in a precedence hierarchy.  Operators may be unary or
    binary, left- or right-associative.  Parse actions can also be attached
    to operator expressions. The generated parser will also recognize the use 
    of parentheses to override operator precedences (see example below).
    
    Note: if you define a deep operator list, you may see performance issues
    when using infixNotation. See L{ParserElement.enablePackrat} for a
    mechanism to potentially improve your parser performance.

    Parameters:
     - baseExpr - expression representing the most basic element for the nested
     - opList - list of tuples, one for each operator precedence level in the
      expression grammar; each tuple is of the form
      (opExpr, numTerms, rightLeftAssoc, parseAction), where:
       - opExpr is the pyparsing expression for the operator;
          may also be a string, which will be converted to a Literal;
          if numTerms is 3, opExpr is a tuple of two expressions, for the
          two operators separating the 3 terms
       - numTerms is the number of terms for this operator (must
          be 1, 2, or 3)
       - rightLeftAssoc is the indicator whether the operator is
          right or left associative, using the pyparsing-defined
          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
       - parseAction is the parse action to be associated with
          expressions matching this operator expression (the
          parse action tuple member may be omitted); if the parse action
          is passed a tuple or list of functions, this is equivalent to
          calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})

    Example::
        # simple example of four-function arithmetic with ints and variable names
        integer = pyparsing_common.signed_integer
        varname = pyparsing_common.identifier 
        
        arith_expr = infixNotation(integer | varname,
            [
            ('-', 1, opAssoc.RIGHT),
            (oneOf('* /'), 2, opAssoc.LEFT),
            (oneOf('+ -'), 2, opAssoc.LEFT),
            ])
        
        arith_expr.runTests('''
            5+3*6
            (5+3)*6
            -2--11
            ''', fullDump=False)
    prints::
        5+3*6
        [[5, '+', [3, '*', 6]]]

        (5+3)*6
        [[[5, '+', 3], '*', 6]]

        -2--11
        [[['-', 2], '-', ['-', 11]]]
    r�Nrbrqz%s termz	%s%s termrsz@if numterms=3, opExpr must be a tuple or list of two expressionsr�z6operator must be unary (1), binary (2), or ternary (3)z2operator must indicate right or left associativity)rr�r�r�r�rV�LEFTrrr�RIGHTr}rrSr�r�r�)ZbaseExprZopListZlparZrparr�ZlastExprr�ZoperDefZopExprZarityZrightLeftAssocr�ZtermNameZopExpr1ZopExpr2ZthisExprr�ryryrzrk�sZ=
&
�



&
�

z4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*�"z string enclosed in double quotesz4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*�'z string enclosed in single quotesz*quotedString using single or double quotes�uzunicode string literalcCs�||krtd��|dk�r*t|t��r"t|t��r"t|�dkr�t|�dkr�|dk	r�tt|t||tjdd����	dd��}n$t
��t||tj��	dd��}nx|dk	r�tt|t|�t|�ttjdd����	dd��}n4ttt|�t|�ttjdd����	d	d��}ntd
��t
�}|dk	�rd|tt|�t||B|B�t|��K}n$|tt|�t||B�t|��K}|�d||f�|S)a~	
    Helper method for defining nested lists enclosed in opening and closing
    delimiters ("(" and ")" are the default).

    Parameters:
     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
     - content - expression for items within the nested lists (default=C{None})
     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})

    If an expression is not provided for the content argument, the nested
    expression will capture all whitespace-delimited content between delimiters
    as a list of separate values.

    Use the C{ignoreExpr} argument to define expressions that may contain
    opening or closing characters that should not be treated as opening
    or closing characters for nesting, such as quotedString or a comment
    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
    The default is L{quotedString}, but if no expressions are to be ignored,
    then pass C{None} for this argument.

    Example::
        data_type = oneOf("void int short long char float double")
        decl_data_type = Combine(data_type + Optional(Word('*')))
        ident = Word(alphas+'_', alphanums+'_')
        number = pyparsing_common.number
        arg = Group(decl_data_type + ident)
        LPAR,RPAR = map(Suppress, "()")

        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))

        c_function = (decl_data_type("type") 
                      + ident("name")
                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 
                      + code_body("body"))
        c_function.ignore(cStyleComment)
        
        source_code = '''
            int is_odd(int x) { 
                return (x%2); 
            }
                
            int dec_to_hex(char hchar) { 
                if (hchar >= '0' && hchar <= '9') { 
                    return (ord(hchar)-ord('0')); 
                } else { 
                    return (10+ord(hchar)-ord('A'));
                } 
            }
        '''
        for func in c_function.searchString(source_code):
            print("%(name)s (%(type)s) args: %(args)s" % func)

    prints::
        is_odd (int) args: [['int', 'x']]
        dec_to_hex (int) args: [['char', 'hchar']]
    z.opening and closing strings cannot be the sameNr�r�cSs|d��Sr��r�rwryryrzr{gr|znestedExpr.<locals>.<lambda>cSs|d��Sr�r
rwryryrzr{jr|cSs|d��Sr�r
rwryryrzr{pr|cSs|d��Sr�r
rwryryrzr{tr|zOopening and closing arguments must be strings if no content expression is givenznested %s%s expression)r�r}r�r�rrrr&rsr�rEr�rrrr-r4r�)ZopenerZcloserZcontentr�r�ryryrzrR%sH:
���������
*$cs��fdd�}�fdd�}�fdd�}tt��d����}t�t��|��d�}t��|��d	�}t��|��d
�}	|r�tt|�|t|t|�t|��|	�}
n$tt|�t|t|�t|���}
|�	t
t��|
�d�S)a
	
    Helper method for defining space-delimited indentation blocks, such as
    those used to define block statements in Python source code.

    Parameters:
     - blockStatementExpr - expression defining syntax of statement that
            is repeated within the indented block
     - indentStack - list created by caller to manage indentation stack
            (multiple statementWithIndentedBlock expressions within a single grammar
            should share a common indentStack)
     - indent - boolean indicating whether block must be indented beyond the
            the current level; set to False for block of left-most statements
            (default=C{True})

    A valid block must contain at least one C{blockStatement}.

    Example::
        data = '''
        def A(z):
          A1
          B = 100
          G = A2
          A2
          A3
        B
        def BB(a,b,c):
          BB1
          def BBA():
            bba1
            bba2
            bba3
        C
        D
        def spam(x,y):
             def eggs(z):
                 pass
        '''


        indentStack = [1]
        stmt = Forward()

        identifier = Word(alphas, alphanums)
        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
        func_body = indentedBlock(stmt, indentStack)
        funcDef = Group( funcDecl + func_body )

        rvalue = Forward()
        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
        rvalue << (funcCall | identifier | Word(nums))
        assignment = Group(identifier + "=" + rvalue)
        stmt << ( funcDef | assignment | identifier )

        module_body = OneOrMore(stmt)

        parseTree = module_body.parseString(data)
        parseTree.pprint()
    prints::
        [['def',
          'A',
          ['(', 'z', ')'],
          ':',
          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
         'B',
         ['def',
          'BB',
          ['(', 'a', 'b', 'c', ')'],
          ':',
          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
         'C',
         'D',
         ['def',
          'spam',
          ['(', 'x', 'y', ')'],
          ':',
          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 
    csN|t|�krdSt||�}|�dkrJ|�dkr>t||d��t||d��dS)Nrtzillegal nestingznot a peer entry)r�r;r#r!�r�r[rxZcurCol��indentStackryrz�checkPeerIndent�s
z&indentedBlock.<locals>.checkPeerIndentcs2t||�}|�dkr"��|�nt||d��dS)Nrtznot a subentry)r;rr!rrryrz�checkSubIndent�s
z%indentedBlock.<locals>.checkSubIndentcsN|t|�krdSt||�}�r6|�dkr6|�dksBt||d�����dS)Nrtr_znot an unindent)r�r;r!rrrryrz�
checkUnindent�s
z$indentedBlock.<locals>.checkUnindentz	 �INDENTr�ZUNINDENTzindented block)rrr
r
rr�r�rrrr�)ZblockStatementExprrr9rrrrErZPEERZUNDENTZsmExprryrrzrhs(N����z#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]z[\0xa1-\0xbf\0xd7\0xf7]z_:zany tagzgt lt amp nbsp quot aposz><& "'z&(?P<entity>r�z);zcommon HTML entitycCst�|j�S)zRHelper parser action to replace common HTML entities with their special characters)�_htmlEntityMapr�Zentityrwryryrzr]�sz/\*(?:[^*]|\*(?!/))*z*/zC style commentz<!--[\s\S]*?-->zHTML commentz.*zrest of linez//(?:\\\n|[^\n])*z
// commentzC++ style commentz#.*zPython style commentrO� 	�	commaItemr�c@s�eZdZdZee�Zee�Ze	e
��d��e�Z
e	e��d��eed��Zed��d��e�Ze��e�de��e��d�Ze�d	d
��eeeed���e�B�d�Ze�e�ed
��d��e�Zed��d��e�ZeeBeB��Zed��d��e�Ze	eded��d�Zed��d�Z ed��d�Z!e!de!d�d�Z"ee!de!d�dee!de!d��d�Z#e#�$dd
��d e �d!�Z%e&e"e%Be#B�d"���d"�Z'ed#��d$�Z(e)d=d&d'��Z*e)d>d)d*��Z+ed+��d,�Z,ed-��d.�Z-ed/��d0�Z.e/��e0��BZ1e)d1d2��Z2e&e3e4d3�e5�e	e6d3d4�ee7d5�������d6�Z8e9ee:�;�e8Bd7d8���d9�Z<e)ed:d
���Z=e)ed;d
���Z>d<S)?rpa�

    Here are some common low-level expressions that may be useful in jump-starting parser development:
     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
     - common L{programming identifiers<identifier>}
     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
     - L{UUID<uuid>}
     - L{comma-separated list<comma_separated_list>}
    Parse actions:
     - C{L{convertToInteger}}
     - C{L{convertToFloat}}
     - C{L{convertToDate}}
     - C{L{convertToDatetime}}
     - C{L{stripHTMLTags}}
     - C{L{upcaseTokens}}
     - C{L{downcaseTokens}}

    Example::
        pyparsing_common.number.runTests('''
            # any int or real number, returned as the appropriate type
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.fnumber.runTests('''
            # any int or real number, returned as float
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.hex_integer.runTests('''
            # hex numbers
            100
            FF
            ''')

        pyparsing_common.fraction.runTests('''
            # fractions
            1/2
            -3/4
            ''')

        pyparsing_common.mixed_integer.runTests('''
            # mixed fractions
            1
            1/2
            -3/4
            1-3/4
            ''')

        import uuid
        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
        pyparsing_common.uuid.runTests('''
            # uuid
            12345678-1234-5678-1234-567812345678
            ''')
    prints::
        # any int or real number, returned as the appropriate type
        100
        [100]

        -100
        [-100]

        +100
        [100]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # any int or real number, returned as float
        100
        [100.0]

        -100
        [-100.0]

        +100
        [100.0]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # hex numbers
        100
        [256]

        FF
        [255]

        # fractions
        1/2
        [0.5]

        -3/4
        [-0.75]

        # mixed fractions
        1
        [1]

        1/2
        [0.5]

        -3/4
        [-0.75]

        1-3/4
        [1.75]

        # uuid
        12345678-1234-5678-1234-567812345678
        [UUID('12345678-1234-5678-1234-567812345678')]
    �integerzhex integerr�z[+-]?\d+zsigned integerr��fractioncCs|d|dS)Nrrtryrwryryrzr{�r|zpyparsing_common.<lambda>r�z"fraction or mixed integer-fractionz
[+-]?\d+\.\d*zreal numberz+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)z$real number with scientific notationz[+-]?\d+\.?\d*([eE][+-]?\d+)?�fnumberr��
identifierzK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}zIPv4 addressz[0-9a-fA-F]{1,4}�hex_integerr��zfull IPv6 address)rrhz::zshort IPv6 addresscCstdd�|D��dkS)Ncss|]}tj�|�rdVqdSr3)rp�
_ipv6_partr�r�ryryrzr��sz,pyparsing_common.<lambda>.<locals>.<genexpr>r�)r�rwryryrzr{�r|z::ffff:zmixed IPv6 addresszIPv6 addressz:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}zMAC address�%Y-%m-%dcs�fdd�}|S)a�
        Helper to create a parse action for converting parsed date string to Python datetime.date

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})

        Example::
            date_expr = pyparsing_common.iso8601_date.copy()
            date_expr.setParseAction(pyparsing_common.convertToDate())
            print(date_expr.parseString("1999-12-31"))
        prints::
            [datetime.date(1999, 12, 31)]
        c
sNzt�|d����WStk
rH}zt||t|���W5d}~XYnXdSr�)r�strptime�dater�r!r~�r�r[rxZve��fmtryrz�cvt_fn�sz.pyparsing_common.convertToDate.<locals>.cvt_fnry�r$r%ryr#rz�
convertToDate�szpyparsing_common.convertToDate�%Y-%m-%dT%H:%M:%S.%fcs�fdd�}|S)a
        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})

        Example::
            dt_expr = pyparsing_common.iso8601_datetime.copy()
            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
        prints::
            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
        c
sJzt�|d��WStk
rD}zt||t|���W5d}~XYnXdSr�)rr r�r!r~r"r#ryrzr%�sz2pyparsing_common.convertToDatetime.<locals>.cvt_fnryr&ryr#rz�convertToDatetime�sz"pyparsing_common.convertToDatetimez7(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?zISO8601 datez�(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?zISO8601 datetimez2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}�UUIDcCstj�|d�S)a
        Parse action to remove HTML tags from web page HTML source

        Example::
            # strip HTML links from normal text 
            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
            td,td_end = makeHTMLTags("TD")
            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
            
            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
        r)rp�_html_stripperr�)r�r[r�ryryrz�
stripHTMLTagss
zpyparsing_common.stripHTMLTagsr�rOrrr�r�zcomma separated listcCst|���Sr�r�rwryryrzr{"r|cCst|���Sr�r�rwryryrzr{%r|N)r)r()?r�r�r�r�rorvZconvertToInteger�floatZconvertToFloatr1rTr�r�rrFrr)Zsigned_integerrr�rr
Z
mixed_integerr��realZsci_realr��numberrr6r5rZipv4_addressrZ_full_ipv6_addressZ_short_ipv6_addressr�Z_mixed_ipv6_addressrZipv6_addressZmac_addressr/r'r)Ziso8601_dateZiso8601_datetime�uuidr9r8r+r,rrrrXr0�
_commasepitemrBr[r�Zcomma_separated_listrfrDryryryrzrpsV""
2
 
���__main__Zselect�fromr=r�)r��columnsr�ZtablesZcommandaK
        # '*' as column list and dotted table name
        select * from SYS.XYZZY

        # caseless match on "SELECT", and casts back to "select"
        SELECT * from XYZZY, ABC

        # list of column names, and mixed case SELECT keyword
        Select AA,BB,CC from Sys.dual

        # multiple tables
        Select A, B, C from Sys.dual, Table2

        # invalid SELECT keyword - should fail
        Xelect A, B, C from Sys.dual

        # incomplete command - should fail
        Select

        # invalid column name - should fail
        Select ^^^ frox Sys.dual

        z]
        100
        -100
        +100
        3.14159
        6.02e23
        1e-12
        z 
        100
        FF
        z6
        12345678-1234-5678-1234-567812345678
        )rs)r�F)N)FT)T)r�)T)�r��__version__Z__versionTime__�
__author__r��weakrefrr�r�r�r�r�rjr�rFrcr�r�_threadr�ImportErrorZ	threadingZcollections.abcrrrr�Zordereddict�__all__r��version_inforbr0�maxsizer0r~r��chrr�r�r�r�r@�reversedr�r�rBr�r]r^rnZmaxintZxranger�Z__builtin__r�Zfnamerror�r�r�r�r�r�Zascii_uppercaseZascii_lowercaser6rTrFr5r�r�Z	printablerXrprr!r#r%r(r�r$�registerr;rLrIrTrWrYrSrrr&r.rrrr�rwrr
r	rnr1r)r'rr0r�rrrr,r+r3r2r"rrrrr rrr�rr4r�r�rr*rr�r/rrr
r-rrdrBr>r�rQrPrYrUrCrirjrlr�rErKrJrcrbr�Z_escapedPuncZ_escapedHexCharZ_escapedOctCharZ_singleCharZ
_charRanger�r�rarOr^r\rorfrDrrMrNrgrrmrVrr	rkrWr@r`r[rerRrhr7rYr9r8r�r�rrr=r]r:rGrr_rAr?rHrZr�r1r<rpr�ZselectTokenZ	fromTokenZidentZ
columnNameZcolumnNameListZ
columnSpecZ	tableNameZ
tableNameListZ	simpleSQLr.r/rrr0r*ryryryrz�<module>s��4�
8



@v&A= I
G3pLOD|M &#@sQ,A,	I#%0
,	?#p
��Zr

 (
 
���� 


"
	PK�V[:��-n_n_,_vendor/__pycache__/six.cpython-38.opt-1.pycnu�[���U

�Qab�u�A@sRdZddlmZddlZddlZddlZddlZddlZdZdZ	ej
ddkZej
ddkZej
dd�dkZ
er�efZefZefZeZeZejZn~efZeefZeejfZeZeZej�d	�r�ed
�ZnHGdd�de�Z ze!e ��Wne"k
�red
�ZYn
Xed
�Z[ dd�Z#dd�Z$Gdd�de�Z%Gdd�de%�Z&Gdd�dej'�Z(Gdd�de%�Z)Gdd�de�Z*e*e+�Z,Gdd�de(�Z-e)dddd �e)d!d"d#d$d!�e)d%d"d"d&d%�e)d'd(d#d)d'�e)d*d(d+�e)d,d"d#d-d,�e)d.d/d/d0d.�e)d1d/d/d.d1�e)d2d(d#d3d2�e)d4d(e
�rd5nd6d7�e)d8d(d9�e)d:d;d<d=�e)d d d�e)d>d>d?�e)d@d@d?�e)dAdAd?�e)d3d(d#d3d2�e)dBd"d#dCdB�e)dDd"d"dEdD�e&d#d(�e&dFdG�e&dHdI�e&dJdKdL�e&dMdNdM�e&dOdPdQ�e&dRdSdT�e&dUdVdW�e&dXdYdZ�e&d[d\d]�e&d^d_d`�e&dadbdc�e&dddedf�e&dgdhdi�e&djdjdk�e&dldldk�e&dmdmdk�e&dndndo�e&dpdq�e&drds�e&dtdu�e&dvdwdv�e&dxdy�e&dzd{d|�e&d}d~d�e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d��e&d�d�d�e&d�d�d��e&d�d�d��e&d�d�d��e&d�e+d�d��e&d�e+d�d��e&d�e+d�e+d��e&d�d�d��e&d�d�d��e&d�d�d��g>Z.ejd�k�rRe.e&d�d��g7Z.e.D]2Z/e0e-e/j1e/�e2e/e&��rVe,�3e/d�e/j1��qV[/e.e-_.e-e+d��Z4e,�3e4d��Gd�d��d�e(�Z5e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d=d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��e)d�d�d��gZ6e6D]Z/e0e5e/j1e/��q�[/e6e5_.e,�3e5e+d��d�dҡGd�dԄd�e(�Z7e)d�d�d��e)d�d�d��e)d�d�d��gZ8e8D]Z/e0e7e/j1e/��q[/e8e7_.e,�3e7e+d��d�dۡGd�d݄d�e(�Z9e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃e)d�d�d߃g!Z:e:D]Z/e0e9e/j1e/��q�[/e:e9_.e,�3e9e+�d��d�d�G�d�d��de(�Z;e)�dd��d�e)�dd��d�e)�dd��d�e)�d	d��d�gZ<e<D]Z/e0e;e/j1e/��q8[/e<e;_.e,�3e;e+�d
��d�d�G�d
�d��de(�Z=e)�dd�d��gZ>e>D]Z/e0e=e/j1e/��q�[/e>e=_.e,�3e=e+�d��d�d�G�d�d��dej'�Z?e,�3e?e+d���d��d�d�Z@�d�d�ZAe�	rH�dZB�dZC�dZD�dZE�dZF�dZGn$�d ZB�d!ZC�d"ZD�d#ZE�d$ZF�d%ZGzeHZIWn"eJk
�	r��d&�d'�ZIYnXeIZHzeKZKWn"eJk
�	r��d(�d)�ZKYnXe�	r�d*�d+�ZLejMZN�d,�d-�ZOeZPn>�d.�d+�ZL�d/�d0�ZN�d1�d-�ZOG�d2�d3��d3e�ZPeKZKe#eL�d4�e�QeB�ZRe�QeC�ZSe�QeD�ZTe�QeE�ZUe�QeF�ZVe�QeG�ZWe�
rԐd5�d6�ZX�d7�d8�ZY�d9�d:�ZZ�d;�d<�Z[e�\�d=�Z]e�\�d>�Z^e�\�d?�Z_nT�d@�d6�ZX�dA�d8�ZY�dB�d:�ZZ�dC�d<�Z[e�\�dD�Z]e�\�dE�Z^e�\�dF�Z_e#eX�dG�e#eY�dH�e#eZ�dI�e#e[�dJ�e�r�dK�dL�Z`�dM�dN�ZaebZcddldZded�e�dO�jfZg[de�hd�ZiejjZkelZmddlnZnenjoZoenjpZp�dPZqej
�dQ�dQk�r�dRZr�dSZsn�dTZr�dUZsnj�dV�dL�Z`�dW�dN�ZaecZcebZg�dX�dY�Zi�dZ�d[�Zke�tejuev�ZmddloZoeojoZoZp�d\Zq�dRZr�dSZse#e`�d]�e#ea�d^��d_�dP�Zw�d`�dT�Zx�da�dU�Zye�r�eze4j{�db�Z|�d|�dc�dd�Z}n�d}�de�df�Z|e|�dg�ej
dd��dhk�r�e|�di�n.ej
dd��dhk�
re|�dj�n�dk�dl�Z~eze4j{�dmd�Zedk�
rL�dn�do�Zej
dd��dpk�
rreZ��dq�do�Ze#e}�dr�ej
dd�dk�
r�ej�ej�f�ds�dt�Z�nej�Z��du�dv�Z��dw�dx�Z��dy�dz�Z�gZ�e+Z�e�����d{�dk	�
r�ge�_�ej��rBe�ej��D]4\Z�Z�ee��j+dk�re�j1e+k�rej�e�=�q>�q[�[�ej���e,�dS(~z6Utilities for writing code that runs on Python 2 and 3�)�absolute_importNz'Benjamin Peterson <benjamin@python.org>z1.10.0��)r��javai���c@seZdZdd�ZdS)�XcCsdS)Nl���selfrr�=/usr/lib/python3.8/site-packages/pkg_resources/_vendor/six.py�__len__>sz	X.__len__N)�__name__�
__module__�__qualname__rrrrrr<srl����cCs
||_dS)z Add documentation to a function.N)�__doc__)�func�docrrr�_add_docKsrcCst|�tj|S)z7Import module, returning the module after the last dot.)�
__import__�sys�modules��namerrr�_import_modulePsrc@seZdZdd�Zdd�ZdS)�
_LazyDescrcCs
||_dS�Nr�r
rrrr�__init__Xsz_LazyDescr.__init__cCsB|��}t||j|�zt|j|j�Wntk
r<YnX|Sr)�_resolve�setattrr�delattr�	__class__�AttributeError)r
�obj�tp�resultrrr�__get__[sz_LazyDescr.__get__N)r
rrrr&rrrrrVsrcs.eZdZd�fdd�	Zdd�Zdd�Z�ZS)	�MovedModuleNcs2tt|��|�tr(|dkr |}||_n||_dSr)�superr'r�PY3�mod)r
r�old�new�r!rrriszMovedModule.__init__cCs
t|j�Sr)rr*r	rrrrrszMovedModule._resolvecCs"|��}t||�}t|||�|Sr)r�getattrr)r
�attr�_module�valuerrr�__getattr__us
zMovedModule.__getattr__)N)r
rrrrr2�
__classcell__rrr-rr'gs	r'cs(eZdZ�fdd�Zdd�ZgZ�ZS)�_LazyModulecstt|��|�|jj|_dSr)r(r4rr!rrr-rrr~sz_LazyModule.__init__cCs ddg}|dd�|jD�7}|S)Nrr
cSsg|]
}|j�qSrr)�.0r/rrr�
<listcomp>�sz'_LazyModule.__dir__.<locals>.<listcomp>)�_moved_attributes)r
Zattrsrrr�__dir__�sz_LazyModule.__dir__)r
rrrr8r7r3rrr-rr4|sr4cs&eZdZd�fdd�	Zdd�Z�ZS)�MovedAttributeNcsdtt|��|�trH|dkr |}||_|dkr@|dkr<|}n|}||_n||_|dkrZ|}||_dSr)r(r9rr)r*r/)r
rZold_modZnew_modZold_attrZnew_attrr-rrr�szMovedAttribute.__init__cCst|j�}t||j�Sr)rr*r.r/)r
�modulerrrr�s
zMovedAttribute._resolve)NN)r
rrrrr3rrr-rr9�sr9c@sVeZdZdZdd�Zdd�Zdd�Zdd	d
�Zdd�Zd
d�Z	dd�Z
dd�ZeZdS)�_SixMetaPathImporterz�
    A meta path importer to import six.moves and its submodules.

    This class implements a PEP302 finder and loader. It should be compatible
    with Python 2.5 and all existing versions of Python3
    cCs||_i|_dSr)r�
known_modules)r
Zsix_module_namerrrr�sz_SixMetaPathImporter.__init__cGs"|D]}||j|jd|<qdS�N�.�r<r)r
r*Z	fullnames�fullnamerrr�_add_module�sz _SixMetaPathImporter._add_modulecCs|j|jd|Sr=r?�r
r@rrr�_get_module�sz _SixMetaPathImporter._get_moduleNcCs||jkr|SdSr)r<)r
r@�pathrrr�find_module�s
z _SixMetaPathImporter.find_modulecCs2z|j|WStk
r,td|��YnXdS)Nz!This loader does not know module )r<�KeyError�ImportErrorrBrrrZ__get_module�sz!_SixMetaPathImporter.__get_modulecCsTztj|WStk
r YnX|�|�}t|t�r@|��}n||_|tj|<|Sr)rrrF� _SixMetaPathImporter__get_module�
isinstancer'r�
__loader__)r
r@r*rrr�load_module�s



z _SixMetaPathImporter.load_modulecCst|�|�d�S)z�
        Return true, if the named module is a package.

        We need this method to get correct spec objects with
        Python 3.4 (see PEP451)
        �__path__)�hasattrrHrBrrr�
is_package�sz_SixMetaPathImporter.is_packagecCs|�|�dS)z;Return None

        Required, if is_package is implementedN)rHrBrrr�get_code�s
z_SixMetaPathImporter.get_code)N)
r
rrrrrArCrErHrKrNrO�
get_sourcerrrrr;�s
	r;c@seZdZdZgZdS)�_MovedItemszLazy loading of moved objectsN)r
rrrrLrrrrrQ�srQZ	cStringIO�io�StringIO�filter�	itertools�builtinsZifilter�filterfalseZifilterfalse�inputZ__builtin__Z	raw_input�internr�map�imap�getcwd�osZgetcwdu�getcwdb�rangeZxrangeZ
reload_module�	importlibZimp�reload�reduce�	functoolsZshlex_quoteZpipesZshlexZquote�UserDict�collections�UserList�
UserString�zipZizip�zip_longestZizip_longestZconfigparserZConfigParser�copyregZcopy_regZdbm_gnuZgdbmzdbm.gnuZ
_dummy_threadZdummy_threadZhttp_cookiejarZ	cookielibzhttp.cookiejarZhttp_cookiesZCookiezhttp.cookiesZ
html_entitiesZhtmlentitydefsz
html.entitiesZhtml_parserZ
HTMLParserzhtml.parserZhttp_clientZhttplibzhttp.clientZemail_mime_multipartzemail.MIMEMultipartzemail.mime.multipartZemail_mime_nonmultipartzemail.MIMENonMultipartzemail.mime.nonmultipartZemail_mime_textzemail.MIMETextzemail.mime.textZemail_mime_basezemail.MIMEBasezemail.mime.baseZBaseHTTPServerzhttp.serverZ
CGIHTTPServerZSimpleHTTPServerZcPickle�pickleZqueueZQueue�reprlib�reprZsocketserverZSocketServer�_thread�threadZtkinterZTkinterZtkinter_dialogZDialogztkinter.dialogZtkinter_filedialogZ
FileDialogztkinter.filedialogZtkinter_scrolledtextZScrolledTextztkinter.scrolledtextZtkinter_simpledialogZSimpleDialogztkinter.simpledialogZtkinter_tixZTixztkinter.tixZtkinter_ttkZttkztkinter.ttkZtkinter_constantsZTkconstantsztkinter.constantsZtkinter_dndZTkdndztkinter.dndZtkinter_colorchooserZtkColorChooserztkinter.colorchooserZtkinter_commondialogZtkCommonDialogztkinter.commondialogZtkinter_tkfiledialogZtkFileDialogZtkinter_fontZtkFontztkinter.fontZtkinter_messageboxZtkMessageBoxztkinter.messageboxZtkinter_tksimpledialogZtkSimpleDialogZurllib_parsez.moves.urllib_parsezurllib.parseZurllib_errorz.moves.urllib_errorzurllib.errorZurllibz
.moves.urllibZurllib_robotparser�robotparserzurllib.robotparserZ
xmlrpc_clientZ	xmlrpclibz
xmlrpc.clientZ
xmlrpc_serverZSimpleXMLRPCServerz
xmlrpc.serverZwin32�winreg�_winregzmoves.z.moves�movesc@seZdZdZdS)�Module_six_moves_urllib_parsez7Lazy loading of moved objects in six.moves.urllib_parseN�r
rrrrrrrrt@srtZParseResultZurlparseZSplitResultZparse_qsZ	parse_qslZ	urldefragZurljoinZurlsplitZ
urlunparseZ
urlunsplitZ
quote_plusZunquoteZunquote_plusZ	urlencodeZ
splitqueryZsplittagZ	splituserZ
uses_fragmentZuses_netlocZuses_paramsZ
uses_queryZ
uses_relative�moves.urllib_parsezmoves.urllib.parsec@seZdZdZdS)�Module_six_moves_urllib_errorz7Lazy loading of moved objects in six.moves.urllib_errorNrurrrrrwhsrwZURLErrorZurllib2Z	HTTPErrorZContentTooShortErrorz.moves.urllib.error�moves.urllib_errorzmoves.urllib.errorc@seZdZdZdS)�Module_six_moves_urllib_requestz9Lazy loading of moved objects in six.moves.urllib_requestNrurrrrry|sryZurlopenzurllib.requestZinstall_openerZbuild_openerZpathname2urlZurl2pathnameZ
getproxiesZRequestZOpenerDirectorZHTTPDefaultErrorHandlerZHTTPRedirectHandlerZHTTPCookieProcessorZProxyHandlerZBaseHandlerZHTTPPasswordMgrZHTTPPasswordMgrWithDefaultRealmZAbstractBasicAuthHandlerZHTTPBasicAuthHandlerZProxyBasicAuthHandlerZAbstractDigestAuthHandlerZHTTPDigestAuthHandlerZProxyDigestAuthHandlerZHTTPHandlerZHTTPSHandlerZFileHandlerZ
FTPHandlerZCacheFTPHandlerZUnknownHandlerZHTTPErrorProcessorZurlretrieveZ
urlcleanupZ	URLopenerZFancyURLopenerZproxy_bypassz.moves.urllib.request�moves.urllib_requestzmoves.urllib.requestc@seZdZdZdS)� Module_six_moves_urllib_responsez:Lazy loading of moved objects in six.moves.urllib_responseNrurrrrr{�sr{Zaddbasezurllib.responseZaddclosehookZaddinfoZ
addinfourlz.moves.urllib.response�moves.urllib_responsezmoves.urllib.responsec@seZdZdZdS)�#Module_six_moves_urllib_robotparserz=Lazy loading of moved objects in six.moves.urllib_robotparserNrurrrrr}�sr}ZRobotFileParserz.moves.urllib.robotparser�moves.urllib_robotparserzmoves.urllib.robotparserc@sNeZdZdZgZe�d�Ze�d�Ze�d�Z	e�d�Z
e�d�Zdd�Zd	S)
�Module_six_moves_urllibzICreate a six.moves.urllib namespace that resembles the Python 3 namespacervrxrzr|r~cCsdddddgS)N�parse�error�request�responserprr	rrrr8�szModule_six_moves_urllib.__dir__N)
r
rrrrL�	_importerrCr�r�r�r�rpr8rrrrr�s




rzmoves.urllibcCstt|j|�dS)zAdd an item to six.moves.N)rrQr)Zmoverrr�add_move�sr�cCsXztt|�WnDtk
rRztj|=Wn"tk
rLtd|f��YnXYnXdS)zRemove item from six.moves.zno such move, %rN)r rQr"rs�__dict__rFrrrr�remove_move�sr��__func__�__self__�__closure__�__code__�__defaults__�__globals__�im_funcZim_selfZfunc_closureZ	func_codeZ
func_defaultsZfunc_globalscCs|��Sr)�next)�itrrr�advance_iteratorsr�cCstdd�t|�jD��S)Ncss|]}d|jkVqdS)�__call__N)r�)r5�klassrrr�	<genexpr>szcallable.<locals>.<genexpr>)�any�type�__mro__)r#rrr�callablesr�cCs|Srr�Zunboundrrr�get_unbound_functionsr�cCs|Srr�r�clsrrr�create_unbound_methodsr�cCs|jSr)r�r�rrrr�"scCst�|||j�Sr)�types�
MethodTyper!)rr#rrr�create_bound_method%sr�cCst�|d|�Sr)r�r�r�rrrr�(sc@seZdZdd�ZdS)�IteratorcCst|��|�Sr)r��__next__r	rrrr�-sz
Iterator.nextN)r
rrr�rrrrr�+sr�z3Get the function out of a possibly unbound functioncKst|jf|��Sr)�iter�keys��d�kwrrr�iterkeys>sr�cKst|jf|��Sr)r��valuesr�rrr�
itervaluesAsr�cKst|jf|��Sr)r��itemsr�rrr�	iteritemsDsr�cKst|jf|��Sr)r�Zlistsr�rrr�	iterlistsGsr�r�r�r�cKs|jf|�Sr)r�r�rrrr�PscKs|jf|�Sr)r�r�rrrr�SscKs|jf|�Sr)r�r�rrrr�VscKs|jf|�Sr)r�r�rrrr�Ys�viewkeys�
viewvalues�	viewitemsz1Return an iterator over the keys of a dictionary.z3Return an iterator over the values of a dictionary.z?Return an iterator over the (key, value) pairs of a dictionary.zBReturn an iterator over the (key, [values]) pairs of a dictionary.cCs
|�d�S)Nzlatin-1)�encode��srrr�bksr�cCs|Srrr�rrr�unsr�z>B�assertCountEqual�ZassertRaisesRegexpZassertRegexpMatches�assertRaisesRegex�assertRegexcCs|Srrr�rrrr��scCst|�dd�d�S)Nz\\z\\\\Zunicode_escape)�unicode�replacer�rrrr��scCst|d�S)Nr��ord)Zbsrrr�byte2int�sr�cCst||�Srr�)Zbuf�irrr�
indexbytes�sr�ZassertItemsEqualzByte literalzText literalcOst|t�||�Sr)r.�_assertCountEqual�r
�args�kwargsrrrr��scOst|t�||�Sr)r.�_assertRaisesRegexr�rrrr��scOst|t�||�Sr)r.�_assertRegexr�rrrr��s�execcCs*|dkr|�}|j|k	r"|�|��|�dSr)�
__traceback__�with_traceback)r$r1�tbrrr�reraise�s


r�cCsB|dkr*t�d�}|j}|dkr&|j}~n|dkr6|}td�dS)zExecute code in a namespace.Nr�zexec _code_ in _globs_, _locs_)r�	_getframe�	f_globals�f_localsr�)Z_code_Z_globs_Z_locs_�framerrr�exec_�s
r�z9def reraise(tp, value, tb=None):
    raise tp, value, tb
)rrzrdef raise_from(value, from_value):
    if from_value is None:
        raise value
    raise value from from_value
zCdef raise_from(value, from_value):
    raise value from from_value
cCs|�dSrr)r1Z
from_valuerrr�
raise_from�sr��printc
s.|�dtj���dkrdS�fdd�}d}|�dd�}|dk	r`t|t�rNd}nt|t�s`td��|�d	d�}|dk	r�t|t�r�d}nt|t�s�td
��|r�td��|s�|D]}t|t�r�d}q�q�|r�td�}td
�}nd}d
}|dkr�|}|dkr�|}t|�D] \}	}|	�r||�||��q||�dS)z4The new-style print function for Python 2.4 and 2.5.�fileNcsdt|t�st|�}t�t�rVt|t�rV�jdk	rVt�dd�}|dkrHd}|��j|�}��|�dS)N�errors�strict)	rI�
basestring�strr�r��encodingr.r��write)�datar���fprrr��s

��zprint_.<locals>.writeF�sepTzsep must be None or a string�endzend must be None or a stringz$invalid keyword arguments to print()�
� )�popr�stdoutrIr�r��	TypeError�	enumerate)
r�r�r�Zwant_unicoder�r��arg�newlineZspacer�rr�r�print_�sL





r�)rrcOs<|�dtj�}|�dd�}t||�|r8|dk	r8|��dS)Nr��flushF)�getrr�r��_printr�)r�r�r�r�rrrr�s

zReraise an exception.cs���fdd�}|S)Ncst�����|�}�|_|Sr)rc�wraps�__wrapped__)�f��assigned�updated�wrappedrr�wrapperszwraps.<locals>.wrapperr)r�r�r�r�rr�rr�sr�cs&G��fdd�d��}t�|ddi�S)z%Create a base class with a metaclass.cseZdZ��fdd�ZdS)z!with_metaclass.<locals>.metaclasscs�|�|�Srr)r�rZ
this_basesr���bases�metarr�__new__'sz)with_metaclass.<locals>.metaclass.__new__N)r
rrr�rr�rr�	metaclass%sr�Ztemporary_classr)r�r�)r�r�r�rr�r�with_metaclass sr�cs�fdd�}|S)z6Class decorator for creating a class with a metaclass.csh|j��}|�d�}|dk	r@t|t�r,|g}|D]}|�|�q0|�dd�|�dd��|j|j|�S)N�	__slots__r��__weakref__)r��copyr�rIr�r�r
�	__bases__)r�Z	orig_vars�slotsZ	slots_var�r�rrr�.s


zadd_metaclass.<locals>.wrapperr)r�r�rr�r�
add_metaclass,sr�cCs2tr.d|jkrtd|j��|j|_dd�|_|S)a
    A decorator that defines __unicode__ and __str__ methods under Python 2.
    Under Python 3 it does nothing.

    To support Python 2 and 3 with a single code base, define a __str__ method
    returning text and apply this decorator to the class.
    �__str__zY@python_2_unicode_compatible cannot be applied to %s because it doesn't define __str__().cSs|���d�S)Nzutf-8)�__unicode__r�r	rrr�<lambda>J�z-python_2_unicode_compatible.<locals>.<lambda>)�PY2r��
ValueErrorr
rr)r�rrr�python_2_unicode_compatible<s
�
r�__spec__)N)NN)�rZ
__future__rrcrU�operatorrr��
__author__�__version__�version_inforr)ZPY34r�Zstring_types�intZ
integer_typesr�Zclass_typesZ	text_type�bytesZbinary_type�maxsizeZMAXSIZEr�ZlongZ	ClassTyper��platform�
startswith�objectr�len�
OverflowErrorrrrr'�
ModuleTyper4r9r;r
r�rQr7r/rrrIrArsrtZ_urllib_parse_moved_attributesrwZ_urllib_error_moved_attributesryZ _urllib_request_moved_attributesr{Z!_urllib_response_moved_attributesr}Z$_urllib_robotparser_moved_attributesrr�r�Z
_meth_funcZ
_meth_selfZ
_func_closureZ
_func_codeZ_func_defaultsZ
_func_globalsr�r��	NameErrorr�r�r�r�r�r��
attrgetterZget_method_functionZget_method_selfZget_function_closureZget_function_codeZget_function_defaultsZget_function_globalsr�r�r�r��methodcallerr�r�r�r�r��chrZunichr�struct�Struct�packZint2byte�
itemgetterr��getitemr�r�Z	iterbytesrRrS�BytesIOr�r�r��partialr[r�r�r�r�r.rVr�r�r�r�r��WRAPPER_ASSIGNMENTS�WRAPPER_UPDATESr�r�r�rrL�__package__�globalsr�r�submodule_search_locations�	meta_pathr�r�Zimporter�appendrrrr�<module>s�

>





























��


�


�D�






















��


��
































�#�����
��





��



5��
PK�V[n�a`��2_vendor/__pycache__/pyparsing.cpython-38.opt-1.pycnu�[���U

�Qabw��i@s�dZdZdZdZddlZddlmZddlZddl	Z	ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlmZzddlmZWn ek
r�ddlmZYnXzdd	lmZdd
lmZWn,ek
r�dd	l
mZdd
l
mZYnXzddl
mZWnBek
�rFzddlmZWnek
�r@dZYnXYnXdd
ddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtgiZee	j�ddu�ZeddukZ e �rpe	j!Z"e#Z$e%Z&e#Z'e(e)e*e+e,ee-e.e/e0e1gZ2n`e	j3Z"e4Z5dvdw�Z'gZ2ddl6Z6dx�7�D]8Z8ze2�9e:e6e8��Wne;k
�r�Y�q�YnX�q�e<dydz�e5d{�D��Z=d|d}�Z>Gd~d�de?�Z@ejAejBZCd�ZDeDd�ZEeCeDZFe%d��ZGd��Hd�dz�ejID��ZJGd�d#�d#eK�ZLGd�d%�d%eL�ZMGd�d'�d'eL�ZNGd�d)�d)eN�ZOGd�d,�d,eK�ZPGd�d��d�e?�ZQGd�d(�d(e?�ZRe�SeR�d�d?�ZTd�dP�ZUd�dM�ZVd�d��ZWd�d��ZXd�d��ZYd�dW�ZZ�d/d�d��Z[Gd�d*�d*e?�Z\Gd�d2�d2e\�Z]Gd�d�de]�Z^Gd�d�de]�Z_Gd�d�de]�Z`e`Zae`e\_bGd�d�de]�ZcGd�d�de`�ZdGd�d
�d
ec�ZeGd�dr�dre]�ZfGd�d5�d5e]�ZgGd�d-�d-e]�ZhGd�d+�d+e]�ZiGd�d�de]�ZjGd�d4�d4e]�ZkGd�d��d�e]�ZlGd�d�del�ZmGd�d�del�ZnGd�d�del�ZoGd�d0�d0el�ZpGd�d/�d/el�ZqGd�d7�d7el�ZrGd�d6�d6el�ZsGd�d&�d&e\�ZtGd�d�det�ZuGd�d"�d"et�ZvGd�d�det�ZwGd�d�det�ZxGd�d$�d$e\�ZyGd�d�dey�ZzGd�d�dey�Z{Gd�d��d�ey�Z|Gd�d�de|�Z}Gd�d8�d8e|�Z~Gd�d��d�e?�Ze�Z�Gd�d!�d!ey�Z�Gd�d.�d.ey�Z�Gd�d�dey�Z�Gd�dÄd�e��Z�Gd�d3�d3ey�Z�Gd�d�de��Z�Gd�d�de��Z�Gd�d�de��Z�Gd�d1�d1e��Z�Gd�d �d e?�Z�d�dh�Z��d0d�dF�Z��d1d�dB�Z�d�dЄZ�d�dU�Z�d�dT�Z�d�dԄZ��d2d�dY�Z�d�dG�Z��d3d�dm�Z�d�dn�Z�d�dp�Z�e^���dI�Z�en���dO�Z�eo���dN�Z�ep���dg�Z�eq���df�Z�egeGd�d�d܍��d�dބ�Z�ehd߃��d�dބ�Z�ehd���d�dބ�Z�e�e�Be�Bejd�d{d܍BZ�e�e�e�d�e��Z�e`d�e�d���d�e�e}e�e�B����d�d�Z�d�de�Z�d�dS�Z�d�db�Z�d�d`�Z�d�ds�Z�e�d�dބ�Z�e�d�dބ�Z�d�d�Z�d�dQ�Z�d�dR�Z�d�dk�Z�e?�e�_��d4d�dq�Z�e@�Z�e?�e�_�e?�e�_�e�d��e�d��fd�do�Z�e�Z�e�ehd��d����d��Z�e�ehd��d����d��Z�e�ehd��d�ehd��d�B����d�Z�e�ea�d�e�������d�Z�d�d�de���f�ddV�Z��d5�ddl�Z�e��d�Z�e��d�Z�e�egeCeF�d����d��\Z�Z�e�ed	�7��d
��Z�eh�d�d�Heàġ��d
����d�ZŐdda�Z�e�eh�d��d����d�Z�eh�d����d�Z�eh�d��ɡ���d�Z�eh�d����d�Z�e�eh�d��de�B����d�Z�e�Z�eh�d����d�Z�e�e}egeJdːd�e�eg�d�e`d˃eo�����ϡ���d�Z�e�e�e���e�Bd��d����d@�Z�G�d dt�dt�Z�eӐd!k�r�ed�d"�Z�ed�d#�Z�egeCeF�d$�Z�e�e֐d%dՐd&���e��Z�e�e�e׃����d'�Zؐd(e�BZ�e�e֐d%dՐd&���e��Z�e�e�eڃ����d)�Z�eԐd*�eِd'�e�eېd)�Z�eܠݐd+�e�jޠݐd,�e�jߠݐd,�e�j�ݐd-�ddl�Z�e�j᠝e�e�j��e�j�ݐd.�dS(6a�	
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================

The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.

Here is a program to parse "Hello, World!" (or any greeting of the form 
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::

    from pyparsing import Word, alphas

    # define grammar of a greeting
    greet = Word(alphas) + "," + Word(alphas) + "!"

    hello = "Hello, World!"
    print (hello, "->", greet.parseString(hello))

The program outputs the following::

    Hello, World! -> ['Hello', ',', 'World', '!']

The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.

The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.

The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
 - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
 - quoted strings
 - embedded comments


Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
 - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
 - construct character word-group expressions using the L{Word} class
 - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
 - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
 - associate names with your parsed results using L{ParserElement.setResultsName}
 - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
 - find more useful common expressions in the L{pyparsing_common} namespace class
z2.2.1z18 Sep 2018 00:49 UTCz*Paul McGuire <ptmcg@users.sourceforge.net>�N)�ref)�datetime)�RLock)�Iterable)�MutableMapping)�OrderedDict�And�CaselessKeyword�CaselessLiteral�
CharsNotIn�Combine�Dict�Each�Empty�
FollowedBy�Forward�
GoToColumn�Group�Keyword�LineEnd�	LineStart�Literal�
MatchFirst�NoMatch�NotAny�	OneOrMore�OnlyOnce�Optional�Or�ParseBaseException�ParseElementEnhance�ParseException�ParseExpression�ParseFatalException�ParseResults�ParseSyntaxException�
ParserElement�QuotedString�RecursiveGrammarException�Regex�SkipTo�	StringEnd�StringStart�Suppress�Token�TokenConverter�White�Word�WordEnd�	WordStart�
ZeroOrMore�	alphanums�alphas�
alphas8bit�anyCloseTag�
anyOpenTag�
cStyleComment�col�commaSeparatedList�commonHTMLEntity�countedArray�cppStyleComment�dblQuotedString�dblSlashComment�
delimitedList�dictOf�downcaseTokens�empty�hexnums�htmlComment�javaStyleComment�line�lineEnd�	lineStart�lineno�makeHTMLTags�makeXMLTags�matchOnlyAtCol�matchPreviousExpr�matchPreviousLiteral�
nestedExpr�nullDebugAction�nums�oneOf�opAssoc�operatorPrecedence�
printables�punc8bit�pythonStyleComment�quotedString�removeQuotes�replaceHTMLEntity�replaceWith�
restOfLine�sglQuotedString�srange�	stringEnd�stringStart�traceParseAction�
unicodeString�upcaseTokens�
withAttribute�
indentedBlock�originalTextFor�ungroup�
infixNotation�locatedExpr�	withClass�
CloseMatch�tokenMap�pyparsing_common�cCsft|t�r|Sz
t|�WStk
r`t|��t��d�}td�}|�dd��|�	|�YSXdS)aDrop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
           then < returns the unicode object | encodes it with the default encoding | ... >.
        �xmlcharrefreplacez&#\d+;cSs$dtt|ddd���dd�S)Nz\ur����)�hex�int��t�ry�C/usr/lib/python3.8/site-packages/pkg_resources/_vendor/pyparsing.py�<lambda>��z_ustr.<locals>.<lambda>N)
�
isinstanceZunicode�str�UnicodeEncodeError�encode�sys�getdefaultencodingr)�setParseAction�transformString)�obj�retZ
xmlcharrefryryrz�_ustr�s

r�z6sum len sorted reversed list tuple set any all min maxccs|]
}|VqdS�Nry)�.0�yryryrz�	<genexpr>�sr��cCs:d}dd�d��D�}t||�D]\}}|�||�}q |S)z/Escape &, <, >, ", ', etc. in a string of data.z&><"'css|]}d|dVqdS)�&�;Nry)r��sryryrzr��sz_xml_escape.<locals>.<genexpr>zamp gt lt quot apos)�split�zip�replace)�dataZfrom_symbolsZ
to_symbolsZfrom_Zto_ryryrz�_xml_escape�s
r�c@seZdZdS)�
_ConstantsN)�__name__�
__module__�__qualname__ryryryrzr��sr��
0123456789ZABCDEFabcdef�\�ccs|]}|tjkr|VqdSr�)�stringZ
whitespace�r��cryryrzr��s
c@sPeZdZdZddd�Zedd��Zdd	�Zd
d�Zdd
�Z	ddd�Z
dd�ZdS)rz7base exception class for all parsing runtime exceptionsrNcCs>||_|dkr||_d|_n||_||_||_|||f|_dS�Nr�)�loc�msg�pstr�
parserElement�args)�selfr�r�r��elemryryrz�__init__�szParseBaseException.__init__cCs||j|j|j|j�S)z�
        internal factory method to simplify creating one type of ParseException 
        from another - avoids having __init__ signature conflicts among subclasses
        )r�r�r�r�)�cls�peryryrz�_from_exception�sz"ParseBaseException._from_exceptioncCsN|dkrt|j|j�S|dkr,t|j|j�S|dkrBt|j|j�St|��dS)z�supported attributes by name are:
            - lineno - returns the line number of the exception text
            - col - returns the column number of the exception text
            - line - returns the line containing the exception text
        rL)r;�columnrIN)rLr�r�r;rI�AttributeError)r�Zanameryryrz�__getattr__�szParseBaseException.__getattr__cCsd|j|j|j|jfS)Nz"%s (at char %d), (line:%d, col:%d))r�r�rLr��r�ryryrz�__str__�s�zParseBaseException.__str__cCst|�Sr��r�r�ryryrz�__repr__�szParseBaseException.__repr__�>!<cCs<|j}|jd}|r4d�|d|�|||d�f�}|��S)z�Extracts the exception line from the input string, and marks
           the location of the exception with a special symbol.
        r�r�N)rIr��join�strip)r�ZmarkerStringZline_strZline_columnryryrz�
markInputline�s

�z ParseBaseException.markInputlinecCsd��tt|��S)Nzlineno col line)r��dir�typer�ryryrz�__dir__szParseBaseException.__dir__)rNN)r�)r�r�r��__doc__r��classmethodr�r�r�r�r�r�ryryryrzr�s



c@seZdZdZdS)r!aN
    Exception thrown when parse expressions don't match class;
    supported attributes by name are:
     - lineno - returns the line number of the exception text
     - col - returns the column number of the exception text
     - line - returns the line containing the exception text
        
    Example::
        try:
            Word(nums).setName("integer").parseString("ABC")
        except ParseException as pe:
            print(pe)
            print("column: {}".format(pe.col))
            
    prints::
       Expected integer (at char 0), (line:1, col:1)
        column: 1
    N�r�r�r�r�ryryryrzr!sc@seZdZdZdS)r#znuser-throwable exception thrown when inconsistent parse content
       is found; stops all parsing immediatelyNr�ryryryrzr#sc@seZdZdZdS)r%z�just like L{ParseFatalException}, but thrown internally when an
       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 
       immediately because an unbacktrackable syntax error has been foundNr�ryryryrzr%sc@s eZdZdZdd�Zdd�ZdS)r(zZexception thrown by L{ParserElement.validate} if the grammar could be improperly recursivecCs
||_dSr��ZparseElementTrace�r��parseElementListryryrzr�4sz"RecursiveGrammarException.__init__cCs
d|jS)NzRecursiveGrammarException: %sr�r�ryryrzr�7sz!RecursiveGrammarException.__str__N)r�r�r�r�r�r�ryryryrzr(2sc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�_ParseResultsWithOffsetcCs||f|_dSr���tup)r�Zp1Zp2ryryrzr�;sz _ParseResultsWithOffset.__init__cCs
|j|Sr�r��r��iryryrz�__getitem__=sz#_ParseResultsWithOffset.__getitem__cCst|jd�S�Nr)�reprr�r�ryryrzr�?sz _ParseResultsWithOffset.__repr__cCs|jd|f|_dSr�r�r�ryryrz�	setOffsetAsz!_ParseResultsWithOffset.setOffsetN)r�r�r�r�r�r�r�ryryryrzr�:sr�c@s�eZdZdZd[dd�Zddddefdd�Zdd	�Zefd
d�Zdd
�Z	dd�Z
dd�Zdd�ZeZ
dd�Zdd�Zdd�Zdd�Zdd�Zer�eZeZeZn$eZeZeZdd�Zd d!�Zd"d#�Zd$d%�Zd&d'�Zd\d(d)�Zd*d+�Zd,d-�Zd.d/�Zd0d1�Z d2d3�Z!d4d5�Z"d6d7�Z#d8d9�Z$d:d;�Z%d<d=�Z&d]d?d@�Z'dAdB�Z(dCdD�Z)dEdF�Z*d^dHdI�Z+dJdK�Z,dLdM�Z-d_dOdP�Z.dQdR�Z/dSdT�Z0dUdV�Z1dWdX�Z2dYdZ�Z3dS)`r$aI
    Structured parse results, to provide multiple means of access to the parsed data:
       - as a list (C{len(results)})
       - by list index (C{results[0], results[1]}, etc.)
       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})

    Example::
        integer = Word(nums)
        date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))
        # equivalent form:
        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

        # parseString returns a ParseResults object
        result = date_str.parseString("1999/12/31")

        def test(s, fn=repr):
            print("%s -> %s" % (s, fn(eval(s))))
        test("list(result)")
        test("result[0]")
        test("result['month']")
        test("result.day")
        test("'month' in result")
        test("'minutes' in result")
        test("result.dump()", str)
    prints::
        list(result) -> ['1999', '/', '12', '/', '31']
        result[0] -> '1999'
        result['month'] -> '12'
        result.day -> '31'
        'month' in result -> True
        'minutes' in result -> False
        result.dump() -> ['1999', '/', '12', '/', '31']
        - day: 31
        - month: 12
        - year: 1999
    NTcCs"t||�r|St�|�}d|_|S�NT)r}�object�__new__�_ParseResults__doinit)r��toklist�name�asList�modalZretobjryryrzr�ks


zParseResults.__new__c
Csb|jrvd|_d|_d|_i|_||_||_|dkr6g}||t�rP|dd�|_n||t�rft|�|_n|g|_t	�|_
|dk	�r^|�r^|s�d|j|<||t�r�t|�}||_||t
d�ttf�r�|ddgfk�s^||t�r�|g}|�r(||t��rt|��d�||<ntt|d�d�||<|||_n6z|d||<Wn$tttfk
�r\|||<YnXdS)NFrr�)r��_ParseResults__name�_ParseResults__parent�_ParseResults__accumNames�_ParseResults__asList�_ParseResults__modal�list�_ParseResults__toklist�_generatorType�dict�_ParseResults__tokdictrvr�r��
basestringr$r��copy�KeyError�	TypeError�
IndexError)r�r�r�r�r�r}ryryrzr�tsB



$
zParseResults.__init__cCsPt|ttf�r|j|S||jkr4|j|ddStdd�|j|D��SdS)NrtrcSsg|]}|d�qS�rry�r��vryryrz�
<listcomp>�sz,ParseResults.__getitem__.<locals>.<listcomp>)r}rv�slicer�r�r�r$r�ryryrzr��s


zParseResults.__getitem__cCs�||t�r0|j�|t��|g|j|<|d}nD||ttf�rN||j|<|}n&|j�|t��t|d�g|j|<|}||t�r�t|�|_	dSr�)
r�r��getr�rvr�r�r$�wkrefr�)r��kr�r}�subryryrz�__setitem__�s


"
zParseResults.__setitem__c
Cs�t|ttf�r�t|j�}|j|=t|t�rH|dkr:||7}t||d�}tt|�|���}|��|j	�
�D]>\}}|D]0}t|�D]"\}\}}	t||	|	|k�||<q�qxqln|j	|=dS�Nrr�)
r}rvr��lenr�r��range�indices�reverser��items�	enumerater�)
r�r�ZmylenZremovedr��occurrences�jr��value�positionryryrz�__delitem__�s

zParseResults.__delitem__cCs
||jkSr�)r�)r�r�ryryrz�__contains__�szParseResults.__contains__cCs
t|j�Sr�)r�r�r�ryryrz�__len__�r|zParseResults.__len__cCs
|jSr��r�r�ryryrz�__bool__�r|zParseResults.__bool__cCs
t|j�Sr���iterr�r�ryryrz�__iter__�r|zParseResults.__iter__cCst|jddd��S�Nrtr�r�ryryrz�__reversed__�r|zParseResults.__reversed__cCs$t|jd�r|j��St|j�SdS)N�iterkeys)�hasattrr�r�r�r�ryryrz�	_iterkeys�s
zParseResults._iterkeyscs�fdd����D�S)Nc3s|]}�|VqdSr�ry�r�r�r�ryrzr��sz+ParseResults._itervalues.<locals>.<genexpr>�rr�ryr�rz�_itervalues�szParseResults._itervaluescs�fdd����D�S)Nc3s|]}|�|fVqdSr�ryrr�ryrzr��sz*ParseResults._iteritems.<locals>.<genexpr>rr�ryr�rz�
_iteritems�szParseResults._iteritemscCst|���S)zVReturns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).)r�r�r�ryryrz�keys�szParseResults.keyscCst|���S)zXReturns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).)r��
itervaluesr�ryryrz�values�szParseResults.valuescCst|���S)zfReturns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).)r��	iteritemsr�ryryrzr��szParseResults.itemscCs
t|j�S)z�Since keys() returns an iterator, this method is helpful in bypassing
           code that looks for the existence of any defined results names.)�boolr�r�ryryrz�haskeys�szParseResults.haskeyscOs�|s
dg}|��D]*\}}|dkr0|d|f}qtd|��qt|dt�sdt|�dksd|d|kr~|d}||}||=|S|d}|SdS)a�
        Removes and returns item at specified index (default=C{last}).
        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
        argument or an integer argument, it will use C{list} semantics
        and pop tokens from the list of parsed tokens. If passed a 
        non-integer argument (most likely a string), it will use C{dict}
        semantics and pop the corresponding value from any defined 
        results names. A second default return value argument is 
        supported, just as in C{dict.pop()}.

        Example::
            def remove_first(tokens):
                tokens.pop(0)
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

            label = Word(alphas)
            patt = label("LABEL") + OneOrMore(Word(nums))
            print(patt.parseString("AAB 123 321").dump())

            # Use pop() in a parse action to remove named result (note that corresponding value is not
            # removed from list form of results)
            def remove_LABEL(tokens):
                tokens.pop("LABEL")
                return tokens
            patt.addParseAction(remove_LABEL)
            print(patt.parseString("AAB 123 321").dump())
        prints::
            ['AAB', '123', '321']
            - LABEL: AAB

            ['AAB', '123', '321']
        rt�defaultrz-pop() got an unexpected keyword argument '%s'r�N)r�r�r}rvr�)r�r��kwargsr�r��indexr�Zdefaultvalueryryrz�pop�s""
�
�zParseResults.popcCs||kr||S|SdS)ai
        Returns named result matching the given key, or if there is no
        such name, then returns the given C{defaultValue} or C{None} if no
        C{defaultValue} is specified.

        Similar to C{dict.get()}.
        
        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            result = date_str.parseString("1999/12/31")
            print(result.get("year")) # -> '1999'
            print(result.get("hour", "not specified")) # -> 'not specified'
            print(result.get("hour")) # -> None
        Nry)r��key�defaultValueryryrzr�3szParseResults.getcCsR|j�||�|j��D]4\}}t|�D]"\}\}}t||||k�||<q(qdS)a
        Inserts new element at location index in the list of parsed tokens.
        
        Similar to C{list.insert()}.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

            # use a parse action to insert the parse location in the front of the parsed results
            def insert_locn(locn, tokens):
                tokens.insert(0, locn)
            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
        N)r��insertr�r�r�r�)r�r
ZinsStrr�r�r�r�r�ryryrzrIszParseResults.insertcCs|j�|�dS)a�
        Add single element to end of ParseResults list of elements.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            
            # use a parse action to compute the sum of the parsed integers, and add it to the end
            def append_sum(tokens):
                tokens.append(sum(map(int, tokens)))
            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
        N)r��append)r��itemryryrzr]szParseResults.appendcCs$t|t�r||7}n|j�|�dS)a
        Add sequence of elements to end of ParseResults list of elements.

        Example::
            patt = OneOrMore(Word(alphas))
            
            # use a parse action to append the reverse of the matched strings, to make a palindrome
            def make_palindrome(tokens):
                tokens.extend(reversed([t[::-1] for t in tokens]))
                return ''.join(tokens)
            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
        N)r}r$r��extend)r�Zitemseqryryrzrks

zParseResults.extendcCs|jdd�=|j��dS)z7
        Clear all elements and results names.
        N)r�r��clearr�ryryrzr}szParseResults.clearcCsjz
||WStk
r YdSX||jkrb||jkrH|j|ddStdd�|j|D��SndSdS)Nr�rtrcSsg|]}|d�qSr�ryr�ryryrzr��sz,ParseResults.__getattr__.<locals>.<listcomp>)r�r�r�r$�r�r�ryryrzr��s


zParseResults.__getattr__cCs|��}||7}|Sr��r�)r��otherr�ryryrz�__add__�szParseResults.__add__cs�|jrjt|j���fdd��|j��}�fdd�|D�}|D],\}}|||<t|dt�r<t|�|d_q<|j|j7_|j�	|j�|S)Ncs|dkr�S|�Sr�ry)�a)�offsetryrzr{�r|z'ParseResults.__iadd__.<locals>.<lambda>c	s4g|],\}}|D]}|t|d�|d��f�qqS�rr�)r��r�r��vlistr�)�	addoffsetryrzr��s�z)ParseResults.__iadd__.<locals>.<listcomp>r)
r�r�r�r�r}r$r�r�r��update)r�rZ
otheritemsZotherdictitemsr�r�ry)rrrz�__iadd__�s


�zParseResults.__iadd__cCs&t|t�r|dkr|��S||SdSr�)r}rvr��r�rryryrz�__radd__�szParseResults.__radd__cCsdt|j�t|j�fS)Nz(%s, %s))r�r�r�r�ryryrzr��szParseResults.__repr__cCsdd�dd�|jD��dS)N�[�, css(|] }t|t�rt|�nt|�VqdSr�)r}r$r�r��r�r�ryryrzr��sz'ParseResults.__str__.<locals>.<genexpr>�])r�r�r�ryryrzr��szParseResults.__str__r�cCsLg}|jD]<}|r |r |�|�t|t�r8||��7}q
|�t|��q
|Sr�)r�rr}r$�
_asStringListr�)r��sep�outrryryrzr(�s


zParseResults._asStringListcCsdd�|jD�S)a�
        Returns the parse results as a nested list of matching tokens, all converted to strings.

        Example::
            patt = OneOrMore(Word(alphas))
            result = patt.parseString("sldkj lsdkj sldkj")
            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
            
            # Use asList() to create an actual list
            result_list = result.asList()
            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
        cSs"g|]}t|t�r|��n|�qSry)r}r$r�)r��resryryrzr��sz'ParseResults.asList.<locals>.<listcomp>r�r�ryryrzr��szParseResults.asListcs6tr|j}n|j}�fdd��t�fdd�|�D��S)a�
        Returns the named parse results as a nested dictionary.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
            
            result_dict = result.asDict()
            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}

            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
            import json
            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
        cs6t|t�r.|��r|��S�fdd�|D�Sn|SdS)Ncsg|]}�|��qSryryr���toItemryrzr��sz7ParseResults.asDict.<locals>.toItem.<locals>.<listcomp>)r}r$r
�asDict)r�r,ryrzr-�s

z#ParseResults.asDict.<locals>.toItemc3s|]\}}|�|�fVqdSr�ry�r�r�r�r,ryrzr��sz&ParseResults.asDict.<locals>.<genexpr>)�PY_3r�rr�)r�Zitem_fnryr,rzr.�s
	zParseResults.asDictcCs8t|j�}|j��|_|j|_|j�|j�|j|_|S)zA
        Returns a new copy of a C{ParseResults} object.
        )r$r�r�r�r�r�r r��r�r�ryryrzr��s
zParseResults.copyFcCsLd}g}tdd�|j��D��}|d}|s8d}d}d}d}	|dk	rJ|}	n|jrV|j}	|	sf|rbdSd}	|||d|	d	g7}t|j�D]�\}
}t|t�r�|
|kr�||�||
|o�|dk||�g7}n||�d|o�|dk||�g7}q�d}|
|kr�||
}|�s|�rq�nd}t	t
|��}
|||d|d	|
d
|d	g	7}q�|||d
|	d	g7}d�|�S)z�
        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
        �
css(|] \}}|D]}|d|fVqqdS�r�Nryrryryrzr�s�z%ParseResults.asXML.<locals>.<genexpr>�  r�NZITEM�<�>�</)r�r�r�r�r�r�r}r$�asXMLr�r�r�)r�ZdoctagZnamedItemsOnly�indentZ	formatted�nlr*Z
namedItemsZnextLevelIndentZselfTagr�r+ZresTagZxmlBodyTextryryrzr8�s^

�

�
�zParseResults.asXMLcCs:|j��D]*\}}|D]\}}||kr|Sqq
dSr�)r�r�)r�r�r�rr�r�ryryrzZ__lookup;s
zParseResults.__lookupcCs�|jr|jS|jr.|��}|r(|�|�SdSnNt|�dkrxt|j�dkrxtt|j����dddkrxtt|j����SdSdS)a(
        Returns the results name for this token expression. Useful when several 
        different expressions might match at a particular location.

        Example::
            integer = Word(nums)
            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
            house_number_expr = Suppress('#') + Word(nums, alphanums)
            user_data = (Group(house_number_expr)("house_number") 
                        | Group(ssn_expr)("ssn")
                        | Group(integer)("age"))
            user_info = OneOrMore(user_data)
            
            result = user_info.parseString("22 111-22-3333 #221B")
            for item in result:
                print(item.getName(), ':', item[0])
        prints::
            age : 22
            ssn : 111-22-3333
            house_number : 221B
        Nr�r)rrt)	r�r��_ParseResults__lookupr�r��nextr�rr)r��parryryrz�getNameBs
��zParseResults.getNamercCsZg}d}|�|t|����|�rP|��r�tdd�|��D��}|D]r\}}|r\|�|�|�d|d||f�t|t�r�|r�|�|�||d��q�|�t|��qF|�t	|��qFn�t
dd�|D���rP|}t|�D]r\}	}
t|
t��r$|�d|d||	|d|d|
�||d�f�q�|�d|d||	|d|dt|
�f�q�d	�|�S)
aH
        Diagnostic method for listing out the contents of a C{ParseResults}.
        Accepts an optional C{indent} argument so that this string can be embedded
        in a nested display of other data.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(result.dump())
        prints::
            ['12', '/', '31', '/', '1999']
            - day: 1999
            - month: 31
            - year: 12
        r2css|]\}}t|�|fVqdSr�)r~r/ryryrzr�~sz$ParseResults.dump.<locals>.<genexpr>z
%s%s- %s: r4r�css|]}t|t�VqdSr�)r}r$)r��vvryryrzr��sz
%s%s[%d]:
%s%s%sr�)
rr�r�r
�sortedr�r}r$�dumpr��anyr�r�)r�r9�depth�fullr*�NLr�r�r�r�r?ryryrzrAgs,

4,zParseResults.dumpcOstj|��f|�|�dS)a�
        Pretty-printer for parsed results as a list, using the C{pprint} module.
        Accepts additional positional or keyword args as defined for the 
        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

        Example::
            ident = Word(alphas, alphanums)
            num = Word(nums)
            func = Forward()
            term = ident | num | Group('(' + func + ')')
            func <<= ident + Group(Optional(delimitedList(term)))
            result = func.parseString("fna a,b,(fnb c,d,200),100")
            result.pprint(width=40)
        prints::
            ['fna',
             ['a',
              'b',
              ['(', 'fnb', ['c', 'd', '200'], ')'],
              '100']]
        N)�pprintr��r�r�rryryrzrF�szParseResults.pprintcCs.|j|j��|jdk	r|��p d|j|jffSr�)r�r�r�r�r�r�r�ryryrz�__getstate__�s��zParseResults.__getstate__cCsN|d|_|d\|_}}|_i|_|j�|�|dk	rDt|�|_nd|_dSr�)r�r�r�r�r r�r�)r��stater=ZinAccumNamesryryrz�__setstate__�s
�zParseResults.__setstate__cCs|j|j|j|jfSr�)r�r�r�r�r�ryryrz�__getnewargs__�szParseResults.__getnewargs__cCstt|��t|���Sr�)r�r�r�rr�ryryrzr��szParseResults.__dir__)NNTT)N)r�)NFr�T)r�rT)4r�r�r�r�r�r}r�r�r�r�r�r�r��__nonzero__r�r�rrrr0rrr�r�rrr
rr�rrrrr�rr!r#r�r�r(r�r.r�r8r;r>rArFrHrJrKr�ryryryrzr$Dsh&
	'	
4

#
=%
-
cCsF|}d|krt|�kr4nn||ddkr4dS||�dd|�S)aReturns current column within a string, counting newlines as line separators.
   The first column is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   rr�r2)r��rfind)r��strgr�ryryrzr;�s
cCs|�dd|�dS)aReturns current line number within a string, counting newlines as line separators.
   The first line is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   r2rr�)�count)r�rNryryrzrL�s
cCsF|�dd|�}|�d|�}|dkr2||d|�S||dd�SdS)zfReturns the line of text containing loc within a string, counting newlines as line separators.
       r2rr�N)rM�find)r�rNZlastCRZnextCRryryrzrI�s
cCs8tdt|�dt|�dt||�t||�f�dS)NzMatch z at loc z(%d,%d))�printr�rLr;)�instringr��exprryryrz�_defaultStartDebugAction�srTcCs$tdt|�dt|����dS)NzMatched z -> )rQr�r~r�)rR�startlocZendlocrS�toksryryrz�_defaultSuccessDebugAction�srWcCstdt|��dS)NzException raised:)rQr�)rRr�rS�excryryrz�_defaultExceptionDebugAction�srYcGsdS)zG'Do-nothing' debug action, to suppress debugging output during parsing.Nry)r�ryryrzrS�srscs��tkr�fdd�Sdg�dg�tdd�dkrFddd�}dd	d
��ntj}tj�d}|dd�d
}|d|d|f�������fdd�}d}zt�dt�d�j�}Wntk
r�t��}YnX||_|S)Ncs�|�Sr�ry�r��lrx)�funcryrzr{r|z_trim_arity.<locals>.<lambda>rFrs)rq�cSs8tdkrdnd}tj||dd�|}|dd�gS)N)rqr]r������r���limitrs)�system_version�	traceback�
extract_stack)rar�
frame_summaryryryrzrdsz"_trim_arity.<locals>.extract_stackcSs$tj||d�}|d}|dd�gS)Nr`rtrs)rc�
extract_tb)�tbraZframesreryryrzrfsz_trim_arity.<locals>.extract_tb�r`rtr�c	s�z"�|�dd��}d�d<|WStk
r��dr>�n4z.t��d}�|dd�ddd��ksj�W5~X�d�kr��dd7<Yq�YqXqdS)NrTrtrsr`r�)r�r��exc_info)r�r�rg�rfZ
foundArityr\ra�maxargsZpa_call_line_synthryrz�wrapper-s z_trim_arity.<locals>.wrapperz<parse action>r��	__class__)r)r)	�singleArgBuiltinsrbrcrdrf�getattrr��	Exceptionr~)r\rkrdZ	LINE_DIFFZ	this_linerl�	func_nameryrjrz�_trim_aritys,

�rrcs�eZdZdZdZdZedd��Zedd��Zd�dd	�Z	d
d�Z
dd
�Zd�dd�Zd�dd�Z
dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zd�dd �Zd!d"�Zd�d#d$�Zd%d&�Zd'd(�ZGd)d*�d*e�Zed+k	r�Gd,d-�d-e�ZnGd.d-�d-e�ZiZe�Zd/d/gZ d�d0d1�Z!eZ"ed2d3��Z#dZ$ed�d5d6��Z%d�d7d8�Z&e'dfd9d:�Z(d;d<�Z)e'fd=d>�Z*e'dfd?d@�Z+dAdB�Z,dCdD�Z-dEdF�Z.dGdH�Z/dIdJ�Z0dKdL�Z1dMdN�Z2dOdP�Z3dQdR�Z4dSdT�Z5dUdV�Z6dWdX�Z7dYdZ�Z8d�d[d\�Z9d]d^�Z:d_d`�Z;dadb�Z<dcdd�Z=dedf�Z>dgdh�Z?d�didj�Z@dkdl�ZAdmdn�ZBdodp�ZCdqdr�ZDgfdsdt�ZEd�dudv�ZF�fdwdx�ZGdydz�ZHd{d|�ZId}d~�ZJdd��ZKd�d�d��ZLd�d�d��ZM�ZNS)�r&z)Abstract base level parser element class.z 
	
FcCs
|t_dS)a�
        Overrides the default whitespace chars

        Example::
            # default whitespace chars are space, <TAB> and newline
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
            
            # change to just treat newline as significant
            ParserElement.setDefaultWhitespaceChars(" \t")
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
        N)r&�DEFAULT_WHITE_CHARS��charsryryrz�setDefaultWhitespaceCharsTs
z'ParserElement.setDefaultWhitespaceCharscCs
|t_dS)a�
        Set class to be used for inclusion of string literals into a parser.
        
        Example::
            # default literal class used is Literal
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']


            # change to Suppress
            ParserElement.inlineLiteralsUsing(Suppress)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
        N)r&�_literalStringClass)r�ryryrz�inlineLiteralsUsingcsz!ParserElement.inlineLiteralsUsingcCs�t�|_d|_d|_d|_||_d|_tj|_	d|_
d|_d|_t�|_
d|_d|_d|_d|_d|_d|_d|_d|_d|_dS)NTFr�)NNN)r��parseAction�
failAction�strRepr�resultsName�
saveAsList�skipWhitespacer&rs�
whiteChars�copyDefaultWhiteChars�mayReturnEmpty�keepTabs�ignoreExprs�debug�streamlined�
mayIndexError�errmsg�modalResults�debugActions�re�callPreparse�
callDuringTry)r��savelistryryrzr�xs(zParserElement.__init__cCs<t�|�}|jdd�|_|jdd�|_|jr8tj|_|S)a$
        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
        for the same parsing pattern, using copies of the original parse element.
        
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
            
            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
        prints::
            [5120, 100, 655360, 268435456]
        Equivalent form of C{expr.copy()} is just C{expr()}::
            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
        N)r�ryr�r�r&rsr)r�Zcpyryryrzr��s
zParserElement.copycCs*||_d|j|_t|d�r&|j|j_|S)af
        Define name for this expression, makes debugging and exception messages clearer.
        
        Example::
            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
        �	Expected �	exception)r�r�r�r�r�rryryrz�setName�s


zParserElement.setNamecCs4|��}|�d�r"|dd�}d}||_||_|S)aP
        Define name for referencing matching tokens as a nested attribute
        of the returned parse results.
        NOTE: this returns a *copy* of the original C{ParserElement} object;
        this is so that the client can define a basic element, such as an
        integer, and reference it in multiple places with different names.

        You can also set results names using the abbreviated syntax,
        C{expr("name")} in place of C{expr.setResultsName("name")} - 
        see L{I{__call__}<__call__>}.

        Example::
            date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))

            # equivalent form:
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
        �*NrtT)r��endswithr|r�)r�r��listAllMatchesZnewselfryryrz�setResultsName�s
zParserElement.setResultsNameTcs@|r&|j�d�fdd�	}�|_||_nt|jd�r<|jj|_|S)z�Method to invoke the Python pdb debugger when this element is
           about to be parsed. Set C{breakFlag} to True to enable, False to
           disable.
        Tcsddl}|���||||�Sr�)�pdbZ	set_trace)rRr��	doActions�callPreParser��Z_parseMethodryrz�breaker�sz'ParserElement.setBreak.<locals>.breaker�_originalParseMethod)TT)�_parser�r�)r�Z	breakFlagr�ryr�rz�setBreak�s
zParserElement.setBreakcOs&tttt|���|_|�dd�|_|S)a
        Define one or more actions to perform when successfully matching parse element definition.
        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
         - s   = the original string being parsed (see note below)
         - loc = the location of the matching substring
         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
        If the functions in fns modify the tokens, they can return them as the return
        value from fn, and the modified list of tokens will replace the original.
        Otherwise, fn does not need to return any value.

        Optional keyword arguments:
         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

        Note: the default parsing behavior is to expand tabs in the input string
        before starting the parsing process.  See L{I{parseString}<parseString>} for more information
        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
        consistent view of the parsed string, the parse location, and line and column
        positions within the parsed string.
        
        Example::
            integer = Word(nums)
            date_str = integer + '/' + integer + '/' + integer

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

            # use parse action to convert to ints at parse time
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            date_str = integer + '/' + integer + '/' + integer

            # note that integer fields are now ints, not strings
            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
        r�F)r��maprrryr�r��r��fnsrryryrzr��s"zParserElement.setParseActioncOs4|jtttt|���7_|jp,|�dd�|_|S)z�
        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
        
        See examples in L{I{copy}<copy>}.
        r�F)ryr�r�rrr�r�r�ryryrz�addParseActionszParserElement.addParseActioncs^|�dd��|�dd�rtnt�|D] ����fdd�}|j�|�q$|jpV|�dd�|_|S)a�Add a boolean predicate function to expression's list of parse actions. See 
        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 
        functions passed to C{addCondition} need to return boolean success/fail of the condition.

        Optional keyword arguments:
         - message = define a custom message to be used in the raised exception
         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
         
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            year_int = integer.copy()
            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
            date_str = year_int + '/' + integer + '/' + integer

            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
        �messagezfailed user-defined condition�fatalFcs$tt��|||��s �||���dSr�)r	rrrZ��exc_type�fnr�ryrz�pa&sz&ParserElement.addCondition.<locals>.par�)r�r#r!ryrr�)r�r�rr�ryr�rz�addConditionszParserElement.addConditioncCs
||_|S)aDefine action to perform if parsing fails at this expression.
           Fail acton fn is a callable function that takes the arguments
           C{fn(s,loc,expr,err)} where:
            - s = string being parsed
            - loc = location where expression match was attempted and failed
            - expr = the parse expression that failed
            - err = the exception thrown
           The function returns no value.  It may throw C{L{ParseFatalException}}
           if it is desired to stop parsing immediately.)rz)r�r�ryryrz�
setFailAction-s
zParserElement.setFailActionc	CsNd}|rJd}|jD]4}z|�||�\}}d}qWqtk
rDYqXqq|S�NTF)r�r�r!)r�rRr�Z
exprsFound�eZdummyryryrz�_skipIgnorables:s


zParserElement._skipIgnorablescCsH|jr|�||�}|jrD|j}t|�}||krD|||krD|d7}q&|S�Nr�)r�r�r~rr�)r�rRr�Zwt�instrlenryryrz�preParseGs
zParserElement.preParsecCs|gfSr�ry�r�rRr�r�ryryrz�	parseImplSszParserElement.parseImplcCs|Sr�ry�r�rRr��	tokenlistryryrz�	postParseVszParserElement.postParsec
Cs�|j}|s|jr�|jdr,|jd|||�|rD|jrD|�||�}n|}|}zDz|�|||�\}}Wn(tk
r�t|t|�|j	|��YnXWnXt
k
r�}	z:|jdr�|jd||||	�|jr�|�||||	��W5d}	~	XYnXn�|�r|j�r|�||�}n|}|}|j�s&|t|�k�rjz|�|||�\}}Wn*tk
�rft|t|�|j	|��YnXn|�|||�\}}|�|||�}t
||j|j|jd�}
|j�r�|�s�|j�r�|�rTzN|jD]B}||||
�}|dk	�r�t
||j|j�o�t|t
tf�|jd�}
�q�WnFt
k
�rP}	z&|jd�r>|jd||||	��W5d}	~	XYnXnJ|jD]B}||||
�}|dk	�rZt
||j|j�o�t|t
tf�|jd�}
�qZ|�r�|jd�r�|jd|||||
�||
fS)Nrrs)r�r�r�)r�rzr�r�r�r�r�r!r�r�rr�r�r$r|r}r�ryr�r}r�)r�rRr�r�r�Z	debugging�prelocZtokensStart�tokens�errZ	retTokensr�ryryrz�
_parseNoCacheZst





�

�
zParserElement._parseNoCachecCs@z|j||dd�dWStk
r:t|||j|��YnXdS)NF)r�r)r�r#r!r��r�rRr�ryryrz�tryParse�szParserElement.tryParsec	Cs4z|�||�Wnttfk
r*YdSXdSdS)NFT)r�r!r�r�ryryrz�canParseNext�s
zParserElement.canParseNextc@seZdZdd�ZdS)zParserElement._UnboundedCachecs~i�t�|_���fdd�}�fdd�}�fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_dS)	Ncs��|��Sr��r��r�r��cache�not_in_cacheryrzr��sz3ParserElement._UnboundedCache.__init__.<locals>.getcs|�|<dSr�ry�r�rr��r�ryrz�set�sz3ParserElement._UnboundedCache.__init__.<locals>.setcs���dSr��rr�r�ryrzr�sz5ParserElement._UnboundedCache.__init__.<locals>.clearcst��Sr��r�r�r�ryrz�	cache_len�sz9ParserElement._UnboundedCache.__init__.<locals>.cache_len)r�r��types�
MethodTyper�r�rr�)r�r�r�rr�ryr�rzr��sz&ParserElement._UnboundedCache.__init__N�r�r�r�r�ryryryrz�_UnboundedCache�sr�Nc@seZdZdd�ZdS)�ParserElement._FifoCachecs�t�|_�t����fdd�}��fdd�}�fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_dS)	Ncs��|��Sr�r�r�r�ryrzr��s�.ParserElement._FifoCache.__init__.<locals>.getcs>|�|<t���kr:z��d�Wqtk
r6YqXqdS�NF)r��popitemr�r�)r��sizeryrzr��s�.ParserElement._FifoCache.__init__.<locals>.setcs���dSr�r�r�r�ryrzr�s�0ParserElement._FifoCache.__init__.<locals>.clearcst��Sr�r�r�r�ryrzr��s�4ParserElement._FifoCache.__init__.<locals>.cache_len)	r�r��_OrderedDictr�r�r�r�rr��r�r�r�r�rr�ry)r�r�r�rzr��s�!ParserElement._FifoCache.__init__Nr�ryryryrz�
_FifoCache�sr�c@seZdZdd�ZdS)r�cs�t�|_�i�t�g�����fdd�}���fdd�}��fdd�}�fdd�}t�||�|_t�||�|_t�||�|_t�||�|_	dS)	Ncs��|��Sr�r�r�r�ryrzr��sr�cs4|�|<t���kr&�����d�q��|�dSr�)r�r�popleftrr�)r��key_fifor�ryrzr��sr�cs������dSr�r�r�)r�r�ryrzr�sr�cst��Sr�r�r�r�ryrzr��sr�)
r�r��collections�dequer�r�r�r�rr�r�ry)r�r�r�r�rzr��sr�Nr�ryryryrzr��srcCsd\}}|||||f}tj��tj}|�|�}	|	|jkr�tj|d7<z|�||||�}	Wn8tk
r�}
z|�||
j	|
j
���W5d}
~
XYn.X|�||	d|	d��f�|	W5QR�Sn@tj|d7<t|	t
�r�|	�|	d|	d��fW5QR�SW5QRXdS)Nrr�r)r&�packrat_cache_lock�
packrat_cacher�r��packrat_cache_statsr�rr�rmr�r�r}rp)r�rRr�r�r�ZHITZMISS�lookupr�r�r�ryryrz�_parseCaches$


zParserElement._parseCachecCs(tj��dgttj�tjdd�<dSr�)r&r�rr�r�ryryryrz�
resetCaches
zParserElement.resetCache�cCs8tjs4dt_|dkr t��t_nt�|�t_tjt_dS)a�Enables "packrat" parsing, which adds memoizing to the parsing logic.
           Repeated parse attempts at the same string location (which happens
           often in many complex grammars) can immediately return a cached value,
           instead of re-executing parsing/validating code.  Memoizing is done of
           both valid results and parsing exceptions.
           
           Parameters:
            - cache_size_limit - (default=C{128}) - if an integer value is provided
              will limit the size of the packrat cache; if None is passed, then
              the cache size will be unbounded; if 0 is passed, the cache will
              be effectively disabled.
            
           This speedup may break existing programs that use parse actions that
           have side-effects.  For this reason, packrat parsing is disabled when
           you first import pyparsing.  To activate the packrat feature, your
           program must call the class method C{ParserElement.enablePackrat()}.  If
           your program uses C{psyco} to "compile as you go", you must call
           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
           Python will crash.  For best results, call C{enablePackrat()} immediately
           after importing pyparsing.
           
           Example::
               import pyparsing
               pyparsing.ParserElement.enablePackrat()
        TN)r&�_packratEnabledr�r�r�r�r�)Zcache_size_limitryryrz�
enablePackrat%szParserElement.enablePackratc
Cs�t��|js|��|jD]}|��q|js8|��}z<|�|d�\}}|rr|�||�}t	�t
�}|�||�Wn0tk
r�}ztjr��n|�W5d}~XYnX|SdS)aB
        Execute the parse expression with the given string.
        This is the main interface to the client code, once the complete
        expression has been built.

        If you want the grammar to require that the entire input string be
        successfully parsed, then set C{parseAll} to True (equivalent to ending
        the grammar with C{L{StringEnd()}}).

        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
        in order to report proper column numbers in parse actions.
        If the input string contains tabs and
        the grammar uses parse actions that use the C{loc} argument to index into the
        string being parsed, you can ensure you have a consistent view of the input
        string by:
         - calling C{parseWithTabs} on your grammar before calling C{parseString}
           (see L{I{parseWithTabs}<parseWithTabs>})
         - define your parse action using the full C{(s,loc,toks)} signature, and
           reference the input string using the parse action's C{s} argument
         - explictly expand the tabs in your input string before calling
           C{parseString}
        
        Example::
            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
        rN)
r&r�r��
streamliner�r��
expandtabsr�r�rr+r�verbose_stacktrace)r�rR�parseAllr�r�r�ZserXryryrz�parseStringHs$

zParserElement.parseStringc
cs6|js|��|jD]}|��q|js4t|���}t|�}d}|j}|j}t	�
�d}	z�||kr�|	|kr�z |||�}
|||
dd�\}}Wntk
r�|
d}YqZX||kr�|	d7}	||
|fV|r�|||�}
|
|kr�|}q�|d7}q�|}qZ|
d}qZWn4tk
�r0}zt	j
�r�n|�W5d}~XYnXdS)a�
        Scan the input string for expression matches.  Each match will return the
        matching tokens, start location, and end location.  May be called with optional
        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
        C{overlap} is specified, then overlapping matches will be reported.

        Note that the start and end locations are reported relative to the string
        being parsed.  See L{I{parseString}<parseString>} for more information on parsing
        strings with embedded tabs.

        Example::
            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
            print(source)
            for tokens,start,end in Word(alphas).scanString(source):
                print(' '*start + '^'*(end-start))
                print(' '*start + tokens[0])
        
        prints::
        
            sldjf123lsdjjkf345sldkjf879lkjsfd987
            ^^^^^
            sldjf
                    ^^^^^^^
                    lsdjjkf
                              ^^^^^^
                              sldkjf
                                       ^^^^^^
                                       lkjsfd
        rF�r�r�N)r�r�r�r�r�r�r�r�r�r&r�r!rr�)r�rR�
maxMatchesZoverlapr�r�r�Z
preparseFnZparseFn�matchesr�ZnextLocr�ZnextlocrXryryrz�
scanStringzsB




zParserElement.scanStringc
Cs�g}d}d|_z�|�|�D]Z\}}}|�|||��|rpt|t�rR||��7}nt|t�rf||7}n
|�|�|}q|�||d��dd�|D�}d�tt	t
|���WStk
r�}ztj
rƂn|�W5d}~XYnXdS)af
        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
        be returned from a parse action.  To use C{transformString}, define a grammar and
        attach a parse action to it that modifies the returned token list.
        Invoking C{transformString()} on a target string will then scan for matches,
        and replace the matched text patterns according to the logic in the parse
        action.  C{transformString()} returns the resulting transformed string.
        
        Example::
            wd = Word(alphas)
            wd.setParseAction(lambda toks: toks[0].title())
            
            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
        Prints::
            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
        rTNcSsg|]}|r|�qSryry)r��oryryrzr��sz1ParserElement.transformString.<locals>.<listcomp>r�)r�r�rr}r$r�r�r�r�r��_flattenrr&r�)r�rRr*ZlastErxr�r�rXryryrzr��s(



zParserElement.transformStringc
CsRztdd�|�||�D��WStk
rL}ztjr8�n|�W5d}~XYnXdS)a�
        Another extension to C{L{scanString}}, simplifying the access to the tokens found
        to match the given parse expression.  May be called with optional
        C{maxMatches} argument, to clip searching after 'n' matches are found.
        
        Example::
            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
            cap_word = Word(alphas.upper(), alphas.lower())
            
            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))

            # the sum() builtin can be used to merge results into a single ParseResults object
            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
        prints::
            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
        cSsg|]\}}}|�qSryry)r�rxr�r�ryryrzr��sz.ParserElement.searchString.<locals>.<listcomp>N)r$r�rr&r�)r�rRr�rXryryrz�searchString�szParserElement.searchStringc	csTd}d}|j||d�D]*\}}}|||�V|r<|dV|}q||d�VdS)a[
        Generator method to split a string using the given expression as a separator.
        May be called with optional C{maxsplit} argument, to limit the number of splits;
        and the optional C{includeSeparators} argument (default=C{False}), if the separating
        matching text should be included in the split results.
        
        Example::        
            punc = oneOf(list(".,;:/-!?"))
            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
        prints::
            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
        r)r�N)r�)	r�rR�maxsplitZincludeSeparatorsZsplitsZlastrxr�r�ryryrzr�s

zParserElement.splitcCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)a�
        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
        converts them to L{Literal}s by default.
        
        Example::
            greet = Word(alphas) + "," + Word(alphas) + "!"
            hello = "Hello, World!"
            print (hello, "->", greet.parseString(hello))
        Prints::
            Hello, World! -> ['Hello', ',', 'World', '!']
        �4Cannot combine element of type %s with ParserElementrs��
stacklevelN)	r}r�r&rw�warnings�warnr��
SyntaxWarningrr"ryryrzrs


�zParserElement.__add__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||S)z]
        Implementation of + operator when left operand is not a C{L{ParserElement}}
        r�rsr�N�r}r�r&rwr�r�r�r�r"ryryrzr#1s


�zParserElement.__radd__cCsJt|t�rt�|�}t|t�s:tjdt|�tdd�dS|t�	�|S)zQ
        Implementation of - operator, returns C{L{And}} with error stop
        r�rsr�N)
r}r�r&rwr�r�r�r�r�
_ErrorStopr"ryryrz�__sub__=s


�zParserElement.__sub__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||S)z]
        Implementation of - operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rsub__Is


�zParserElement.__rsub__cs�t|t�r|d}}n�t|t�r�|ddd�}|ddkrHd|df}t|dt�r�|ddkr�|ddkrvt��S|ddkr�t��S�|dt��Sq�t|dt�r�t|dt�r�|\}}||8}q�tdt|d�t|d���ntdt|���|dk�rtd��|dk�rtd	��||k�r6dk�rBnntd
��|�r���fdd��|�r�|dk�rt��|�}nt�g|��|�}n�|�}n|dk�r��}nt�g|�}|S)
a�
        Implementation of * operator, allows use of C{expr * 3} in place of
        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
        may also include C{None} as in:
         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
              to C{expr*n + L{ZeroOrMore}(expr)}
              (read as "at least n instances of C{expr}")
         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
              (read as "0 to n instances of C{expr}")
         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

        Note that C{expr*(None,n)} does not raise an exception if
        more than n exprs exist in the input stream; that is,
        C{expr*(None,n)} does not enforce a maximum number of expr
        occurrences.  If this behavior is desired, then write
        C{expr*(None,n) + ~expr}
        r)NNNrsr�z7cannot multiply 'ParserElement' and ('%s','%s') objectsz0cannot multiply 'ParserElement' and '%s' objectsz/cannot multiply ParserElement by negative valuez@second tuple value must be greater or equal to first tuple valuez+cannot multiply ParserElement by 0 or (0,0)cs(|dkrt��|d��St��SdSr�)r��n��makeOptionalListr�ryrzr��sz/ParserElement.__mul__.<locals>.makeOptionalList)	r}rv�tupler4rr�r��
ValueErrorr)r�rZminElementsZoptElementsr�ryr�rz�__mul__UsD







zParserElement.__mul__cCs
|�|�Sr�)rr"ryryrz�__rmul__�szParserElement.__rmul__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zI
        Implementation of | operator - returns C{L{MatchFirst}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__or__�s


�zParserElement.__or__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||BS)z]
        Implementation of | operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__ror__�s


�zParserElement.__ror__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zA
        Implementation of ^ operator - returns C{L{Or}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__xor__�s


�zParserElement.__xor__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||AS)z]
        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rxor__�s


�zParserElement.__rxor__cCsFt|t�rt�|�}t|t�s:tjdt|�tdd�dSt||g�S)zC
        Implementation of & operator - returns C{L{Each}}
        r�rsr�N)	r}r�r&rwr�r�r�r�rr"ryryrz�__and__�s


�zParserElement.__and__cCsBt|t�rt�|�}t|t�s:tjdt|�tdd�dS||@S)z]
        Implementation of & operator when left operand is not a C{L{ParserElement}}
        r�rsr�Nr�r"ryryrz�__rand__�s


�zParserElement.__rand__cCst|�S)zE
        Implementation of ~ operator - returns C{L{NotAny}}
        )rr�ryryrz�
__invert__�szParserElement.__invert__cCs|dk	r|�|�S|��SdS)a

        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
        
        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
        passed as C{True}.
           
        If C{name} is omitted, same as calling C{L{copy}}.

        Example::
            # these are equivalent
            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
        N)r�r�rryryrz�__call__�s
zParserElement.__call__cCst|�S)z�
        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
        cluttering up returned output.
        )r-r�ryryrz�suppress�szParserElement.suppresscCs
d|_|S)a
        Disables the skipping of whitespace before matching the characters in the
        C{ParserElement}'s defined pattern.  This is normally only used internally by
        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
        F�r~r�ryryrz�leaveWhitespaceszParserElement.leaveWhitespacecCsd|_||_d|_|S)z8
        Overrides the default whitespace chars
        TF)r~rr�)r�ruryryrz�setWhitespaceChars
sz ParserElement.setWhitespaceCharscCs
d|_|S)z�
        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
        Must be called before C{parseString} when the input grammar contains elements that
        match C{<TAB>} characters.
        T)r�r�ryryrz�
parseWithTabsszParserElement.parseWithTabscCsLt|t�rt|�}t|t�r4||jkrH|j�|�n|j�t|����|S)a�
        Define expression to be ignored (e.g., comments) while doing pattern
        matching; may be called repeatedly, to define multiple comment or other
        ignorable patterns.
        
        Example::
            patt = OneOrMore(Word(alphas))
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
            
            patt.ignore(cStyleComment)
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
        )r}r�r-r�rr�r"ryryrz�ignores


zParserElement.ignorecCs"|pt|pt|ptf|_d|_|S)zT
        Enable display of debugging messages while doing pattern matching.
        T)rTrWrYr�r�)r�ZstartActionZ
successActionZexceptionActionryryrz�setDebugActions6s�zParserElement.setDebugActionscCs|r|�ttt�nd|_|S)a�
        Enable display of debugging messages while doing pattern matching.
        Set C{flag} to True to enable, False to disable.

        Example::
            wd = Word(alphas).setName("alphaword")
            integer = Word(nums).setName("numword")
            term = wd | integer
            
            # turn on debugging for wd
            wd.setDebug()

            OneOrMore(term).parseString("abc 123 xyz 890")
        
        prints::
            Match alphaword at loc 0(1,1)
            Matched alphaword -> ['abc']
            Match alphaword at loc 3(1,4)
            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
            Match alphaword at loc 7(1,8)
            Matched alphaword -> ['xyz']
            Match alphaword at loc 11(1,12)
            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
            Match alphaword at loc 15(1,16)
            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

        The output shown is that produced by the default debug actions - custom debug actions can be
        specified using L{setDebugActions}. Prior to attempting
        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
        which makes debugging and exception messages easier to understand - for instance, the default
        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
        F)rrTrWrYr�)r��flagryryrz�setDebug@s#zParserElement.setDebugcCs|jSr�)r�r�ryryrzr�iszParserElement.__str__cCst|�Sr�r�r�ryryrzr�lszParserElement.__repr__cCsd|_d|_|Sr�)r�r{r�ryryrzr�oszParserElement.streamlinecCsdSr�ryr�ryryrz�checkRecursiontszParserElement.checkRecursioncCs|�g�dS)zj
        Check defined expressions for valid structure, check for infinite recursive definitions.
        N)r)r��
validateTraceryryrz�validatewszParserElement.validatecCs�z|��}Wn2tk
r>t|d��}|��}W5QRXYnXz|�||�WStk
r~}ztjrj�n|�W5d}~XYnXdS)z�
        Execute the parse expression on the given file or filename.
        If a filename is specified (instead of a file object),
        the entire file is opened, read, and closed before parsing.
        �rN)�readr��openr�rr&r�)r�Zfile_or_filenamer�Z
file_contents�frXryryrz�	parseFile}szParserElement.parseFilecsHt|t�r"||kp t|�t|�kSt|t�r6|�|�Stt|�|kSdSr�)r}r&�varsr�r��superr"�rmryrz�__eq__�s



zParserElement.__eq__cCs
||kSr�ryr"ryryrz�__ne__�szParserElement.__ne__cCstt|��Sr�)�hash�idr�ryryrz�__hash__�szParserElement.__hash__cCs||kSr�ryr"ryryrz�__req__�szParserElement.__req__cCs
||kSr�ryr"ryryrz�__rne__�szParserElement.__rne__cCs4z|jt|�|d�WdStk
r.YdSXdS)a�
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.
           
        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
            
        Example::
            expr = Word(nums)
            assert expr.matches("100")
        �r�TFN)r�r�r)r�Z
testStringr�ryryrzr��s

zParserElement.matches�#cCs�t|t�r"tttj|������}t|t�r4t|�}g}g}d}	|D�]�}
|dk	r^|�	|
d�sf|rr|
sr|�
|
�qD|
sxqDd�|�|
g}g}z:|
�dd�}
|j
|
|d�}|�
|j|d��|	o�|}	Wn�tk
�rr}
z�t|
t�r�dnd	}d|
k�r*|�
t|
j|
��|�
d
t|
j|
�dd|�n|�
d
|
jd|�|�
d
t|
��|	�o\|}	|
}W5d}
~
XYnDtk
�r�}z$|�
dt|��|	�o�|}	|}W5d}~XYnX|�r�|�r�|�
d	�td�|��|�
|
|f�qD|	|fS)a3
        Execute the parse expression on a series of test strings, showing each
        test, the parsed results or where the parse failed. Quick and easy way to
        run a parse expression against a list of sample strings.
           
        Parameters:
         - tests - a list of separate test strings, or a multiline string of test strings
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
              string; pass None to disable comment filtering
         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
              if False, only dump nested list
         - printResults - (default=C{True}) prints test output to stdout
         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

        Returns: a (success, results) tuple, where success indicates that all tests succeeded
        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
        test's output
        
        Example::
            number_expr = pyparsing_common.number.copy()

            result = number_expr.runTests('''
                # unsigned integer
                100
                # negative integer
                -100
                # float with scientific notation
                6.02e23
                # integer with scientific notation
                1e-12
                ''')
            print("Success" if result[0] else "Failed!")

            result = number_expr.runTests('''
                # stray character
                100Z
                # missing leading digit before '.'
                -.100
                # too many '.'
                3.14.159
                ''', failureTests=True)
            print("Success" if result[0] else "Failed!")
        prints::
            # unsigned integer
            100
            [100]

            # negative integer
            -100
            [-100]

            # float with scientific notation
            6.02e23
            [6.02e+23]

            # integer with scientific notation
            1e-12
            [1e-12]

            Success
            
            # stray character
            100Z
               ^
            FAIL: Expected end of text (at char 3), (line:1, col:4)

            # missing leading digit before '.'
            -.100
            ^
            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

            # too many '.'
            3.14.159
                ^
            FAIL: Expected end of text (at char 4), (line:1, col:5)

            Success

        Each test string must be on a single line. If you want to test a string that spans multiple
        lines, create a test like this::

            expr.runTest(r"this is a test\n of strings that spans \n 3 lines")
        
        (Note that this is a raw string literal, you must include the leading 'r'.)
        TNFr2�\nr%)rDz(FATAL)r�� r��^zFAIL: zFAIL-EXCEPTION: )r}r�r�r�r~r��rstrip�
splitlinesrr�rr�r�r�rArr#rIr�r;rprQ)r�Ztestsr�ZcommentZfullDumpZprintResultsZfailureTestsZ
allResultsZcomments�successrxr*�resultr�r�rXryryrz�runTests�sNW




$


zParserElement.runTests)F)F)T)T)TT)TT)r�)F)N)T)F)T)Tr&TTF)Or�r�r�r�rsr��staticmethodrvrxr�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�rr�r�r�r�r�r�r�r��_MAX_INTr�r�r�r�rr#r�r�rrrrrrrrrr	r
rr
rrrrr�r�r�rrrrrr"r#r$r�r.�
__classcell__ryryrrzr&Os�




&




G

"
2G+D
			

)

cs eZdZdZ�fdd�Z�ZS)r.zT
    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    cstt|�jdd�dS�NF�r�)rr.r�r�rryrzr�@	szToken.__init__�r�r�r�r�r�r1ryryrrzr.<	scs eZdZdZ�fdd�Z�ZS)rz,
    An empty token, will always match.
    cs$tt|���d|_d|_d|_dS)NrTF)rrr�r�r�r�r�rryrzr�H	szEmpty.__init__r4ryryrrzrD	scs*eZdZdZ�fdd�Zddd�Z�ZS)rz(
    A token that will never match.
    cs*tt|���d|_d|_d|_d|_dS)NrTFzUnmatchable token)rrr�r�r�r�r�r�rryrzr�S	s
zNoMatch.__init__TcCst|||j|��dSr�)r!r�r�ryryrzr�Z	szNoMatch.parseImpl)T�r�r�r�r�r�r�r1ryryrrzrO	scs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Token to exactly match a specified string.
    
    Example::
        Literal('blah').parseString('blah')  # -> ['blah']
        Literal('blah').parseString('blahfooblah')  # -> ['blah']
        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    
    For case-insensitive matching, use L{CaselessLiteral}.
    
    For keyword matching (force word break before and after the matched string),
    use L{Keyword} or L{CaselessKeyword}.
    cs�tt|���||_t|�|_z|d|_Wn*tk
rVtj	dt
dd�t|_YnXdt
|j�|_d|j|_d|_d|_dS)Nrz2null string passed to Literal; use Empty() insteadrsr��"%s"r�F)rrr��matchr��matchLen�firstMatchCharr�r�r�r�rrmr�r�r�r�r��r��matchStringrryrzr�l	s
�zLiteral.__init__TcCsJ|||jkr6|jdks&|�|j|�r6||j|jfSt|||j|��dSr�)r9r8�
startswithr7r!r�r�ryryrzr�	s��zLiteral.parseImpl)Tr5ryryrrzr^	s
csLeZdZdZedZd�fdd�	Zddd	�Z�fd
d�Ze	dd
��Z
�ZS)ra\
    Token to exactly match a specified string as a keyword, that is, it must be
    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    Accepts two optional constructor arguments in addition to the keyword string:
     - C{identChars} is a string of characters that would be valid identifier characters,
          defaulting to all alphanumerics + "_" and "$"
     - C{caseless} allows case-insensitive matching, default is C{False}.
       
    Example::
        Keyword("start").parseString("start")  # -> ['start']
        Keyword("start").parseString("starting")  # -> Exception

    For case-insensitive matching, use L{CaselessKeyword}.
    �_$NFcs�tt|���|dkrtj}||_t|�|_z|d|_Wn$tk
r^t	j
dtdd�YnXd|j|_d|j|_
d|_d|_||_|r�|��|_|��}t|�|_dS)Nrz2null string passed to Keyword; use Empty() insteadrsr�r6r�F)rrr��DEFAULT_KEYWORD_CHARSr7r�r8r9r�r�r�r�r�r�r�r��caseless�upper�
caselessmatchr��
identChars)r�r;rBr?rryrzr��	s*
�
zKeyword.__init__TcCs|jr|||||j���|jkr�|t|�|jksL|||j��|jkr�|dksj||d��|jkr�||j|jfSnv|||jkr�|jdks�|�|j|�r�|t|�|jks�|||j|jkr�|dks�||d|jkr�||j|jfSt	|||j
|��dSr�)r?r8r@rAr�rBr7r9r<r!r�r�ryryrzr��	s4����������zKeyword.parseImplcstt|���}tj|_|Sr�)rrr�r>rB)r�r�rryrzr��	szKeyword.copycCs
|t_dS)z,Overrides the default Keyword chars
        N)rr>rtryryrz�setDefaultKeywordChars�	szKeyword.setDefaultKeywordChars)NF)T)r�r�r�r�r5r>r�r�r�r/rCr1ryryrrzr�	s
cs*eZdZdZ�fdd�Zddd�Z�ZS)r
al
    Token to match a specified string, ignoring case of letters.
    Note: the matched results will always be in the case of the given
    match string, NOT the case of the input text.

    Example::
        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
        
    (Contrast with example for L{CaselessKeyword}.)
    cs6tt|��|���||_d|j|_d|j|_dS)Nz'%s'r�)rr
r�r@�returnStringr�r�r:rryrzr��	szCaselessLiteral.__init__TcCs@||||j���|jkr,||j|jfSt|||j|��dSr�)r8r@r7rDr!r�r�ryryrzr��	szCaselessLiteral.parseImpl)Tr5ryryrrzr
�	s
cs,eZdZdZd�fdd�	Zd	dd�Z�ZS)
r	z�
    Caseless version of L{Keyword}.

    Example::
        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
        
    (Contrast with example for L{CaselessLiteral}.)
    Ncstt|�j||dd�dS)NT�r?)rr	r�)r�r;rBrryrzr��	szCaselessKeyword.__init__TcCsj||||j���|jkrV|t|�|jksF|||j��|jkrV||j|jfSt|||j|��dSr�)r8r@rAr�rBr7r!r�r�ryryrzr��	s��zCaselessKeyword.parseImpl)N)Tr5ryryrrzr	�	scs,eZdZdZd�fdd�	Zd	dd�Z�ZS)
rnax
    A variation on L{Literal} which matches "close" matches, that is, 
    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
     - C{match_string} - string to be matched
     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    
    The results from a successful parse will contain the matched text from the input string and the following named results:
     - C{mismatches} - a list of the positions within the match_string where mismatches were found
     - C{original} - the original match_string used to compare against the input string
    
    If C{mismatches} is an empty list, then the match was an exact match.
    
    Example::
        patt = CloseMatch("ATCATCGAATGGA")
        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

        # exact match
        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

        # close match allowing up to 2 mismatches
        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    r�csBtt|���||_||_||_d|j|jf|_d|_d|_dS)Nz&Expected %r (with up to %d mismatches)F)	rrnr�r��match_string�
maxMismatchesr�r�r�)r�rFrGrryrzr�

szCloseMatch.__init__TcCs�|}t|�}|t|j�}||kr�|j}d}g}	|j}
tt|||�|j��D]2\}}|\}}
||
krN|	�|�t|	�|
krNq�qN|d}t|||�g�}|j|d<|	|d<||fSt|||j|��dS)Nrr��original�
mismatches)	r�rFrGr�r�rr$r!r�)r�rRr�r��startr��maxlocrFZmatch_stringlocrIrGZs_m�src�mat�resultsryryrzr�
s( 

zCloseMatch.parseImpl)r�)Tr5ryryrrzrn�	s	cs8eZdZdZd
�fdd�	Zdd	d
�Z�fdd�Z�ZS)r1a	
    Token for matching words composed of allowed character sets.
    Defined with string containing all allowed initial characters,
    an optional string containing allowed body characters (if omitted,
    defaults to the initial character set), and an optional minimum,
    maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction. An optional
    C{excludeChars} parameter can list characters that might be found in 
    the input C{bodyChars} string; useful to define a word of all printables
    except for one or two characters, for instance.
    
    L{srange} is useful for defining custom character set strings for defining 
    C{Word} expressions, using range notation from regular expression character sets.
    
    A common mistake is to use C{Word} to match a specific literal string, as in 
    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    I{sets} of matchable characters. This expression would match "Add", "AAA",
    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    To match an exact literal string, use L{Literal} or L{Keyword}.

    pyparsing includes helper strings for building Words:
     - L{alphas}
     - L{nums}
     - L{alphanums}
     - L{hexnums}
     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
     - L{printables} (any non-whitespace character)

    Example::
        # a word composed of digits
        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
        
        # a word with a leading capital, and zero or more lowercase
        capital_word = Word(alphas.upper(), alphas.lower())

        # hostnames are alphanumeric, with leading alpha, and '-'
        hostname = Word(alphas, alphanums+'-')
        
        # roman numeral (not a strict parser, accepts invalid mix of characters)
        roman = Word("IVXLCDM")
        
        # any string of non-whitespace characters, except for ','
        csv_value = Word(printables, excludeChars=",")
    Nr�rFcs�tt|����rFd��fdd�|D��}|rFd��fdd�|D��}||_t|�|_|rl||_t|�|_n||_t|�|_|dk|_	|dkr�t
d��||_|dkr�||_nt
|_|dkr�||_||_t|�|_d|j|_d	|_||_d
|j|jk�r�|dk�r�|dk�r�|dk�r�|j|jk�r8dt|j�|_nHt|j�dk�rfdt�|j�t|j�f|_nd
t|j�t|j�f|_|j�r�d|jd|_zt�|j�|_Wntk
�r�d|_YnXdS)Nr�c3s|]}|�kr|VqdSr�ryr���excludeCharsryrzr�`
sz Word.__init__.<locals>.<genexpr>c3s|]}|�kr|VqdSr�ryr�rOryrzr�b
srr�zZcannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permittedr�Fr(z[%s]+z%s[%s]*z	[%s][%s]*z\b)rr1r�r��
initCharsOrigr��	initChars�
bodyCharsOrig�	bodyChars�maxSpecifiedr��minLen�maxLenr0r�r�r�r��	asKeyword�_escapeRegexRangeChars�reStringr�r��escape�compilerp)r�rRrT�min�max�exactrXrPrrOrzr�]
s\



0
����z
Word.__init__Tc
Cs>|jr<|j�||�}|s(t|||j|��|��}||��fS|||jkrZt|||j|��|}|d7}t|�}|j}||j	}t
||�}||kr�|||kr�|d7}q�d}	|||jkr�d}	|jr�||kr�|||kr�d}	|j
�r|dkr�||d|k�s||k�r|||k�rd}	|	�r.t|||j|��||||�fS)Nr�FTr)r�r7r!r��end�grouprRr�rTrWr]rVrUrX)
r�rRr�r�r-rJr�Z	bodycharsrKZthrowExceptionryryrzr��
s6


2zWord.parseImplcsvztt|���WStk
r$YnX|jdkrpdd�}|j|jkr`d||j�||j�f|_nd||j�|_|jS)NcSs$t|�dkr|dd�dS|SdS)N��...r��r�ryryrz�
charsAsStr�
sz Word.__str__.<locals>.charsAsStrz	W:(%s,%s)zW:(%s))rr1r�rpr{rQrS)r�rerryrzr��
s
zWord.__str__)Nr�rrFN)T�r�r�r�r�r�r�r�r1ryryrrzr1.
s.6
#csFeZdZdZee�d��Zd�fdd�	Zddd�Z	�fd	d
�Z
�ZS)
r)a�
    Token for matching strings that match a given regular expression.
    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 
    named parse results.

    Example::
        realnum = Regex(r"[+-]?\d+\.\d*")
        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    z[A-Z]rcs�tt|���t|t�r�|s,tjdtdd�||_||_	zt
�|j|j	�|_
|j|_Wq�t
jk
r�tjd|tdd��Yq�Xn2t|tj�r�||_
t|�|_|_||_	ntd��t|�|_d|j|_d|_d|_d	S)
z�The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.z0null string passed to Regex; use Empty() insteadrsr��$invalid pattern (%s) passed to RegexzCRegex may only be constructed with a string or a compiled RE objectr�FTN)rr)r�r}r�r�r�r��pattern�flagsr�r\rZ�
sre_constants�error�compiledREtyper~r�r�r�r�r�r�)r�rhrirryrzr��
s:
�
�
�
zRegex.__init__TcCs`|j�||�}|s"t|||j|��|��}|��}t|���}|rX|D]}||||<qF||fSr�)r�r7r!r�r`�	groupdictr$ra)r�rRr�r�r-�dr�r�ryryrzr��
szRegex.parseImplcsFztt|���WStk
r$YnX|jdkr@dt|j�|_|jS)NzRe:(%s))rr)r�rpr{r�rhr�rryrzr�
s
z
Regex.__str__)r)T)r�r�r�r�r�r�r\rlr�r�r�r1ryryrrzr)�
s
"

cs8eZdZdZd�fdd�	Zddd�Z�fd	d
�Z�ZS)
r'a�
    Token for matching strings that are delimited by quoting characters.
    
    Defined with the following parameters:
        - quoteChar - string of one or more characters defining the quote delimiting string
        - escChar - character to escape quotes, typically backslash (default=C{None})
        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

    Example::
        qs = QuotedString('"')
        print(qs.searchString('lsjdf "This is the quote" sldjf'))
        complex_qs = QuotedString('{{', endQuoteChar='}}')
        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
        sql_qs = QuotedString('"', escQuote='""')
        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    prints::
        [['This is the quote']]
        [['This is the "quote"']]
        [['This is the quote with "embedded" quotes']]
    NFTc
sNtt����|��}|s0tjdtdd�t��|dkr>|}n"|��}|s`tjdtdd�t��|�_t	|��_
|d�_|�_t	|��_
|�_|�_|�_|�_|r�tjtjB�_dt��j�t�jd�|dk	r�t|�p�df�_n<d�_dt��j�t�jd�|dk	�rt|��pdf�_t	�j�d	k�rp�jd
d��fdd
�tt	�j�d	dd�D��d7_|�r��jdt�|�7_|�r��jdt�|�7_t��j�d�_�jdt��j�7_zt��j�j��_�j�_Wn0tjk
�r&tjd�jtdd��YnXt ���_!d�j!�_"d�_#d�_$dS)Nz$quoteChar cannot be the empty stringrsr�z'endQuoteChar cannot be the empty stringrz%s(?:[^%s%s]r�z%s(?:[^%s\n\r%s]r�z|(?:z)|(?:c3s4|],}dt��jd|��t�j|�fVqdS)z%s[^%s]N)r�r[�endQuoteCharrYr&r�ryrzr�Xs��z(QuotedString.__init__.<locals>.<genexpr>rt�)z|(?:%s)z|(?:%s.)z(.)z)*%srgr�FT)%rr'r�r�r�r�r��SyntaxError�	quoteCharr��quoteCharLen�firstQuoteCharro�endQuoteCharLen�escChar�escQuote�unquoteResults�convertWhitespaceEscapesr��	MULTILINE�DOTALLrir[rYrhr�r��escCharReplacePatternr\rZrjrkr�r�r�r�r�)r�rrrvrwZ	multilinerxroryrr�rzr�/s|



��
������
zQuotedString.__init__c	Cs�|||jkr|j�||�pd}|s4t|||j|��|��}|��}|jr�||j|j	�}t
|t�r�d|kr�|jr�ddddd�}|�
�D]\}}|�||�}q�|jr�t�|jd|�}|jr�|�|j|j�}||fS)N�\�	r2��
)�\tr'z\fz\rz\g<1>)rtr�r7r!r�r`rarxrsrur}r�ryr�r�rvr�r|rwro)	r�rRr�r�r-r�Zws_mapZwslitZwscharryryrzr�ps* 
�zQuotedString.parseImplcsHztt|���WStk
r$YnX|jdkrBd|j|jf|_|jS)Nz.quoted string, starting with %s ending with %s)rr'r�rpr{rrror�rryrzr��s
zQuotedString.__str__)NNFTNT)Trfryryrrzr'sA
#cs8eZdZdZd�fdd�	Zddd�Z�fd	d
�Z�ZS)
ra�
    Token for matching words composed of characters I{not} in a given set (will
    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    Defined with string containing all disallowed characters, and an optional
    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction.

    Example::
        # define a comma-separated-value as anything that is not a ','
        csv_value = CharsNotIn(',')
        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    prints::
        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    r�rcs�tt|���d|_||_|dkr*td��||_|dkr@||_nt|_|dkrZ||_||_t	|�|_
d|j
|_|jdk|_d|_
dS)NFr�zfcannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permittedrr�)rrr�r~�notCharsr�rVrWr0r�r�r�r�r�)r�r�r]r^r_rryrzr��s 
zCharsNotIn.__init__TcCs�|||jkrt|||j|��|}|d7}|j}t||jt|��}||krb|||krb|d7}qD|||jkr�t|||j|��||||�fSr�)r�r!r�r]rWr�rV)r�rRr�r�rJZnotchars�maxlenryryrzr��s
�
zCharsNotIn.parseImplcsfztt|���WStk
r$YnX|jdkr`t|j�dkrTd|jdd�|_nd|j|_|jS)Nrbz
!W:(%s...)z!W:(%s))rrr�rpr{r�r�r�rryrzr��s
zCharsNotIn.__str__)r�rr)Trfryryrrzr�s
cs<eZdZdZdddddd�Zd�fdd�	Zddd�Z�ZS)r0a�
    Special matching class for matching whitespace.  Normally, whitespace is ignored
    by pyparsing grammars.  This class is included when some whitespace structures
    are significant.  Define with a string containing the whitespace characters to be
    matched; default is C{" \t\r\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    as defined for the C{L{Word}} class.
    z<SPC>z<TAB>z<LF>z<CR>z<FF>)r(r~r2r�r� 	
r�rcs�tt����|�_��d��fdd��jD���d�dd��jD���_d�_d�j�_	|�_
|dkrt|�_nt�_|dkr�|�_|�_
dS)Nr�c3s|]}|�jkr|VqdSr�)�
matchWhiter�r�ryrzr��s
z!White.__init__.<locals>.<genexpr>css|]}tj|VqdSr�)r0�	whiteStrsr�ryryrzr��sTr�r)
rr0r�r�r
r�rr�r�r�rVrWr0)r�Zwsr]r^r_rr�rzr��s zWhite.__init__TcCs�|||jkrt|||j|��|}|d7}||j}t|t|��}||krb|||jkrb|d7}qB|||jkr�t|||j|��||||�fSr�)r�r!r�rWr]r�rV)r�rRr�r�rJrKryryrzr�	s

zWhite.parseImpl)r�r�rr)T)r�r�r�r�r�r�r�r1ryryrrzr0�s�cseZdZ�fdd�Z�ZS)�_PositionTokencs(tt|���|jj|_d|_d|_dSr�)rr�r�rmr�r�r�r�r�rryrzr�s
z_PositionToken.__init__�r�r�r�r�r1ryryrrzr�sr�cs2eZdZdZ�fdd�Zdd�Zd	dd�Z�ZS)
rzb
    Token to advance to a specific column of input text; useful for tabular report scraping.
    cstt|���||_dSr�)rrr�r;)r��colnorryrzr�$szGoToColumn.__init__cCs\t||�|jkrXt|�}|jr*|�||�}||krX||��rXt||�|jkrX|d7}q*|Sr�)r;r�r�r��isspace)r�rRr�r�ryryrzr�(s$
zGoToColumn.preParseTcCsDt||�}||jkr"t||d|��||j|}|||�}||fS)NzText not in expected column�r;r!)r�rRr�r�ZthiscolZnewlocr�ryryrzr�1s

zGoToColumn.parseImpl)T)r�r�r�r�r�r�r�r1ryryrrzr s	cs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Matches if current position is at the beginning of a line within the parse string
    
    Example::
    
        test = '''        AAA this line
        AAA and this line
          AAA but not this one
        B AAA and definitely not this one
        '''

        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
            print(t)
    
    Prints::
        ['AAA', ' this line']
        ['AAA', ' and this line']    

    cstt|���d|_dS)NzExpected start of line)rrr�r�r�rryrzr�OszLineStart.__init__TcCs*t||�dkr|gfSt|||j|��dSr�)r;r!r�r�ryryrzr�SszLineStart.parseImpl)Tr5ryryrrzr:scs*eZdZdZ�fdd�Zddd�Z�ZS)rzU
    Matches if current position is at the end of a line within the parse string
    cs,tt|���|�tj�dd��d|_dS)Nr2r�zExpected end of line)rrr�r
r&rsr�r�r�rryrzr�\szLineEnd.__init__TcCsb|t|�kr6||dkr$|ddfSt|||j|��n(|t|�krN|dgfSt|||j|��dS)Nr2r��r�r!r�r�ryryrzr�aszLineEnd.parseImpl)Tr5ryryrrzrXscs*eZdZdZ�fdd�Zddd�Z�ZS)r,zM
    Matches if current position is at the beginning of the parse string
    cstt|���d|_dS)NzExpected start of text)rr,r�r�r�rryrzr�pszStringStart.__init__TcCs0|dkr(||�|d�kr(t|||j|��|gfSr�)r�r!r�r�ryryrzr�tszStringStart.parseImpl)Tr5ryryrrzr,lscs*eZdZdZ�fdd�Zddd�Z�ZS)r+zG
    Matches if current position is at the end of the parse string
    cstt|���d|_dS)NzExpected end of text)rr+r�r�r�rryrzr�szStringEnd.__init__TcCs^|t|�krt|||j|��n<|t|�kr6|dgfS|t|�krJ|gfSt|||j|��dSr�r�r�ryryrzr��szStringEnd.parseImpl)Tr5ryryrrzr+{scs.eZdZdZef�fdd�	Zddd�Z�ZS)r3ap
    Matches if the current position is at the beginning of a Word, and
    is not preceded by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    the string being parsed, or at the beginning of a line.
    cs"tt|���t|�|_d|_dS)NzNot at the start of a word)rr3r�r��	wordCharsr��r�r�rryrzr��s
zWordStart.__init__TcCs@|dkr8||d|jks(|||jkr8t|||j|��|gfSr�)r�r!r�r�ryryrzr��s�zWordStart.parseImpl)T�r�r�r�r�rXr�r�r1ryryrrzr3�scs.eZdZdZef�fdd�	Zddd�Z�ZS)r2aZ
    Matches if the current position is at the end of a Word, and
    is not followed by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{} behavior of regular expressions,
    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    the string being parsed, or at the end of a line.
    cs(tt|���t|�|_d|_d|_dS)NFzNot at the end of a word)rr2r�r�r�r~r�r�rryrzr��s
zWordEnd.__init__TcCsPt|�}|dkrH||krH|||jks8||d|jkrHt|||j|��|gfSr�)r�r�r!r�)r�rRr�r�r�ryryrzr��s�zWordEnd.parseImpl)Tr�ryryrrzr2�scs�eZdZdZd�fdd�	Zdd�Zdd�Zd	d
�Z�fdd�Z�fd
d�Z	�fdd�Z
d�fdd�	Zgfdd�Z�fdd�Z
�ZS)r"z^
    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
    Fcs�tt|��|�t|t�r"t|�}t|t�r<t�|�g|_	nht|t
�rxt|�}tdd�|D��rlttj|�}t|�|_	n,zt|�|_	Wnt
k
r�|g|_	YnXd|_dS)Ncss|]}t|t�VqdSr�)r}r�)r�rSryryrzr��sz+ParseExpression.__init__.<locals>.<genexpr>F)rr"r�r}r�r�r�r&rw�exprsr�allr�r�r��r�r�r�rryrzr��s


zParseExpression.__init__cCs
|j|Sr�)r�r�ryryrzr��szParseExpression.__getitem__cCs|j�|�d|_|Sr�)r�rr{r"ryryrzr�szParseExpression.appendcCs0d|_dd�|jD�|_|jD]}|��q|S)z~Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
           all contained expressions.FcSsg|]}|���qSryr�r�r�ryryrzr��sz3ParseExpression.leaveWhitespace.<locals>.<listcomp>)r~r�r)r�r�ryryrzr�s


zParseExpression.leaveWhitespacecsrt|t�rB||jkrntt|��|�|jD]}|�|jd�q*n,tt|��|�|jD]}|�|jd�qX|Sr�)r}r-r�rr"rr�)r�rr�rryrzr�s



zParseExpression.ignorecsNztt|���WStk
r$YnX|jdkrHd|jjt|j�f|_|jS�Nz%s:(%s))	rr"r�rpr{rmr�r�r�r�rryrzr��s
zParseExpression.__str__cs*tt|���|jD]}|��qt|j�dk�r|jd}t||j�r�|js�|jdkr�|j	s�|jdd�|jdg|_d|_
|j|jO_|j|jO_|jd}t||j��r|j�s|jdk�r|j	�s|jdd�|jdd�|_d|_
|j|jO_|j|jO_dt
|�|_|S)Nrsrr�rtr�)rr"r�r�r�r}rmryr|r�r{r�r�r�r�)r�r�rrryrzr��s<


���
���zParseExpression.streamlinecstt|��||�}|Sr�)rr"r�)r�r�r�r�rryrzr�
szParseExpression.setResultsNamecCs6|dd�|g}|jD]}|�|�q|�g�dSr�)r�rr)r�r�tmpr�ryryrzr
s
zParseExpression.validatecs$tt|���}dd�|jD�|_|S)NcSsg|]}|���qSryrr�ryryrzr�%
sz(ParseExpression.copy.<locals>.<listcomp>)rr"r�r�r1rryrzr�#
szParseExpression.copy)F)F)r�r�r�r�r�r�rrrr�r�r�rr�r1ryryrrzr"�s	
"csTeZdZdZGdd�de�Zd�fdd�	Zddd�Zd	d
�Zdd�Z	d
d�Z
�ZS)ra

    Requires all given C{ParseExpression}s to be found in the given order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'+'} operator.
    May also be constructed using the C{'-'} operator, which will suppress backtracking.

    Example::
        integer = Word(nums)
        name_expr = OneOrMore(Word(alphas))

        expr = And([integer("id"),name_expr("name"),integer("age")])
        # more easily written as:
        expr = integer("id") + name_expr("name") + integer("age")
    cseZdZ�fdd�Z�ZS)zAnd._ErrorStopcs&ttj|�j||�d|_|��dS)N�-)rrr�r�r�rrGrryrzr�9
szAnd._ErrorStop.__init__r�ryryrrzr�8
sr�TcsRtt|��||�tdd�|jD��|_|�|jdj�|jdj|_d|_	dS)Ncss|]}|jVqdSr��r�r�ryryrzr�@
szAnd.__init__.<locals>.<genexpr>rT)
rrr�r�r�r�r
rr~r�r�rryrzr�>
s
zAnd.__init__c	Cs�|jdj|||dd�\}}d}|jdd�D]�}t|tj�rDd}q.|r�z|�|||�\}}Wq�tk
rt�Yq�tk
r�}zd|_t�|��W5d}~XYq�t	k
r�t|t
|�|j|��Yq�Xn|�|||�\}}|s�|��r.||7}q.||fS)NrFr�r�T)
r�r�r}rr�r%r�
__traceback__r�r�r�r�r
)	r�rRr�r��
resultlistZ	errorStopr�Z
exprtokensr�ryryrzr�E
s(
z
And.parseImplcCst|t�rt�|�}|�|�Sr��r}r�r&rwrr"ryryrzr!^
s

zAnd.__iadd__cCs6|dd�|g}|jD]}|�|�|jsq2qdSr�)r�rr��r�r��subRecCheckListr�ryryrzrc
s


zAnd.checkRecursioncCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr��{r(css|]}t|�VqdSr�r�r�ryryrzr�o
szAnd.__str__.<locals>.<genexpr>�}�r�r�r{r�r�r�ryryrzr�j
s


 zAnd.__str__)T)T)r�r�r�r�rr�r�r�r!rr�r1ryryrrzr(
s
csDeZdZdZd�fdd�	Zddd�Zdd	�Zd
d�Zdd
�Z�Z	S)ra�
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the expression that matches the longest string will be used.
    May be constructed using the C{'^'} operator.

    Example::
        # construct Or using '^' operator
        
        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789"))
    prints::
        [['123'], ['3.1416'], ['789']]
    Fcs:tt|��||�|jr0tdd�|jD��|_nd|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr��
szOr.__init__.<locals>.<genexpr>T)rrr�r�rBr�r�rryrzr��
szOr.__init__TcCsRd}d}g}|jD]�}z|�||�}Wnvtk
rb}	zd|	_|	j|krR|	}|	j}W5d}	~	XYqtk
r�t|�|kr�t|t|�|j|�}t|�}YqX|�||f�q|�r(|j	dd�d�|D]^\}
}z|�
|||�WStk
�r$}	z d|	_|	j|k�r|	}|	j}W5d}	~	XYq�Xq�|dk	�r@|j|_|�nt||d|��dS)NrtcSs
|dSr�ry)�xryryrzr{�
r|zOr.parseImpl.<locals>.<lambda>)r� no defined alternatives to match)r�r�r!r�r�r�r�r�r�sortr�r�)r�rRr�r��	maxExcLoc�maxExceptionr�r�Zloc2r��_ryryrzr��
s<


zOr.parseImplcCst|t�rt�|�}|�|�Sr�r�r"ryryrz�__ixor__�
s

zOr.__ixor__cCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�z ^ css|]}t|�VqdSr�r�r�ryryrzr��
szOr.__str__.<locals>.<genexpr>r�r�r�ryryrzr��
s


 z
Or.__str__cCs,|dd�|g}|jD]}|�|�qdSr��r�rr�ryryrzr�
s
zOr.checkRecursion)F)T)
r�r�r�r�r�r�r�r�rr1ryryrrzrt
s

&	csDeZdZdZd�fdd�	Zddd�Zdd	�Zd
d�Zdd
�Z�Z	S)ra�
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the first one listed is the one that will match.
    May be constructed using the C{'|'} operator.

    Example::
        # construct MatchFirst using '|' operator
        
        # watch the order of expressions to match
        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

        # put more selective expression first
        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    Fcs:tt|��||�|jr0tdd�|jD��|_nd|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr��
sz&MatchFirst.__init__.<locals>.<genexpr>T)rrr�r�rBr�r�rryrzr��
szMatchFirst.__init__Tc	Cs�d}d}|jD]�}z|�|||�}|WStk
r`}z|j|krP|}|j}W5d}~XYqtk
r�t|�|kr�t|t|�|j|�}t|�}YqXq|dk	r�|j|_|�nt||d|��dS)Nrtr�)r�r�r!r�r�r�r�r�)	r�rRr�r�r�r�r�r�r�ryryrzr��
s$


zMatchFirst.parseImplcCst|t�rt�|�}|�|�Sr�r�r"ryryrz�__ior__�
s

zMatchFirst.__ior__cCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�� | css|]}t|�VqdSr�r�r�ryryrzr��
sz%MatchFirst.__str__.<locals>.<genexpr>r�r�r�ryryrzr��
s


 zMatchFirst.__str__cCs,|dd�|g}|jD]}|�|�qdSr�r�r�ryryrzrs
zMatchFirst.checkRecursion)F)T)
r�r�r�r�r�r�r�r�rr1ryryrrzr�
s
	cs<eZdZdZd�fdd�	Zddd�Zdd�Zd	d
�Z�ZS)
ram
    Requires all given C{ParseExpression}s to be found, but in any order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'&'} operator.

    Example::
        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
        integer = Word(nums)
        shape_attr = "shape:" + shape_type("shape")
        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
        color_attr = "color:" + color("color")
        size_attr = "size:" + integer("size")

        # use Each (using operator '&') to accept attributes in any order 
        # (shape and posn are required, color and size are optional)
        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

        shape_spec.runTests('''
            shape: SQUARE color: BLACK posn: 100, 120
            shape: CIRCLE size: 50 color: BLUE posn: 50,80
            color:GREEN size:20 shape:TRIANGLE posn:20,40
            '''
            )
    prints::
        shape: SQUARE color: BLACK posn: 100, 120
        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
        - color: BLACK
        - posn: ['100', ',', '120']
          - x: 100
          - y: 120
        - shape: SQUARE


        shape: CIRCLE size: 50 color: BLUE posn: 50,80
        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
        - color: BLUE
        - posn: ['50', ',', '80']
          - x: 50
          - y: 80
        - shape: CIRCLE
        - size: 50


        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
        - color: GREEN
        - posn: ['20', ',', '40']
          - x: 20
          - y: 40
        - shape: TRIANGLE
        - size: 20
    Tcs8tt|��||�tdd�|jD��|_d|_d|_dS)Ncss|]}|jVqdSr�r�r�ryryrzr�?sz Each.__init__.<locals>.<genexpr>T)rrr�r�r�r�r~�initExprGroupsr�rryrzr�=sz
Each.__init__c	s�|jr�tdd�|jD��|_dd�|jD�}dd�|jD�}|||_dd�|jD�|_dd�|jD�|_dd�|jD�|_|j|j7_d	|_|}|jdd�}|jdd��g}d
}	|	�rj|�|j|j}
g}|
D]v}z|�||�}Wn t	k
�r|�
|�Yq�X|�
|j�t|�|��||k�r@|�
|�q�|�kr܈�
|�q�t|�t|
�kr�d	}	q�|�r�d�dd�|D��}
t	||d
|
��|�fdd�|jD�7}g}|D]"}|�|||�\}}|�
|��q�t|tg��}||fS)Ncss&|]}t|t�rt|j�|fVqdSr�)r}rr!rSr�ryryrzr�Es
z!Each.parseImpl.<locals>.<genexpr>cSsg|]}t|t�r|j�qSry�r}rrSr�ryryrzr�Fs
z"Each.parseImpl.<locals>.<listcomp>cSs g|]}|jrt|t�s|�qSry)r�r}rr�ryryrzr�Gs
cSsg|]}t|t�r|j�qSry)r}r4rSr�ryryrzr�Is
cSsg|]}t|t�r|j�qSry)r}rrSr�ryryrzr�Js
cSs g|]}t|tttf�s|�qSry)r}rr4rr�ryryrzr�KsFTr%css|]}t|�VqdSr�r�r�ryryrzr�fsz*Missing one or more required elements (%s)cs$g|]}t|t�r|j�kr|�qSryr�r��ZtmpOptryrzr�js

)r�r�r�Zopt1mapZ	optionalsZmultioptionalsZ
multirequiredZrequiredr�r!rr�r!�remover�r�r��sumr$)r�rRr�r�Zopt1Zopt2ZtmpLocZtmpReqdZ
matchOrderZkeepMatchingZtmpExprsZfailedr�Zmissingr�rNZfinalResultsryr�rzr�CsP

zEach.parseImplcCs@t|d�r|jS|jdkr:dd�dd�|jD��d|_|jS)Nr�r�z & css|]}t|�VqdSr�r�r�ryryrzr�yszEach.__str__.<locals>.<genexpr>r�r�r�ryryrzr�ts


 zEach.__str__cCs,|dd�|g}|jD]}|�|�qdSr�r�r�ryryrzr}s
zEach.checkRecursion)T)T)	r�r�r�r�r�r�r�rr1ryryrrzrs
5
1	csleZdZdZd�fdd�	Zddd�Zdd	�Z�fd
d�Z�fdd
�Zdd�Z	gfdd�Z
�fdd�Z�ZS)r za
    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    Fcs�tt|��|�t|t�r@ttjt�r2t�|�}nt�t	|��}||_
d|_|dk	r�|j|_|j
|_
|�|j�|j|_|j|_|j|_|j�|j�dSr�)rr r�r}r��
issubclassr&rwr.rrSr{r�r�r
rr~r}r�r�r�r�rSr�rryrzr��s
zParseElementEnhance.__init__TcCs2|jdk	r|jj|||dd�Std||j|��dS)NFr�r�)rSr�r!r�r�ryryrzr��s
zParseElementEnhance.parseImplcCs*d|_|j��|_|jdk	r&|j��|Sr�)r~rSr�rr�ryryrzr�s


z#ParseElementEnhance.leaveWhitespacecsrt|t�rB||jkrntt|��|�|jdk	rn|j�|jd�n,tt|��|�|jdk	rn|j�|jd�|Sr�)r}r-r�rr rrSr"rryrzr�s



zParseElementEnhance.ignorecs&tt|���|jdk	r"|j��|Sr�)rr r�rSr�rryrzr��s

zParseElementEnhance.streamlinecCsB||krt||g��|dd�|g}|jdk	r>|j�|�dSr�)r(rSr)r�r�r�ryryrzr�s

z"ParseElementEnhance.checkRecursioncCs6|dd�|g}|jdk	r(|j�|�|�g�dSr��rSrr�r�rr�ryryrzr�s
zParseElementEnhance.validatecsXztt|���WStk
r$YnX|jdkrR|jdk	rRd|jjt|j�f|_|jSr�)	rr r�rpr{rSrmr�r�r�rryrzr��szParseElementEnhance.__str__)F)T)
r�r�r�r�r�r�rrr�rrr�r1ryryrrzr �s
cs*eZdZdZ�fdd�Zddd�Z�ZS)ra�
    Lookahead matching of the given parse expression.  C{FollowedBy}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression matches at the current
    position.  C{FollowedBy} always returns a null token list.

    Example::
        # use FollowedBy to match a label only if it is followed by a ':'
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    prints::
        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    cstt|��|�d|_dSr�)rrr�r��r�rSrryrzr��szFollowedBy.__init__TcCs|j�||�|gfSr�)rSr�r�ryryrzr��szFollowedBy.parseImpl)Tr5ryryrrzr�scs2eZdZdZ�fdd�Zd	dd�Zdd�Z�ZS)
ra�
    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression does I{not} match at the current
    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    always returns a null token list.  May be constructed using the '~' operator.

    Example::
        
    cs0tt|��|�d|_d|_dt|j�|_dS)NFTzFound unwanted token, )rrr�r~r�r�rSr�r�rryrzr��szNotAny.__init__TcCs&|j�||�rt|||j|��|gfSr�)rSr�r!r�r�ryryrzr��szNotAny.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�z~{r��r�r�r{r�rSr�ryryrzr�s


zNotAny.__str__)Trfryryrrzr�s

cs(eZdZd�fdd�	Zddd�Z�ZS)	�_MultipleMatchNcsFtt|��|�d|_|}t|t�r.t�|�}|dk	r<|nd|_dSr�)	rr�r�r}r}r�r&rw�	not_ender)r�rS�stopOnZenderrryrzr�s

z_MultipleMatch.__init__Tc	Cs�|jj}|j}|jdk	}|r$|jj}|r2|||�||||dd�\}}zV|j}	|r`|||�|	rp|||�}
n|}
|||
|�\}}|s�|��rR||7}qRWnttfk
r�YnX||fS�NFr�)	rSr�r�r�r�r�r
r!r�)r�rRr�r�Zself_expr_parseZself_skip_ignorablesZcheck_enderZ
try_not_enderr�ZhasIgnoreExprsr�Z	tmptokensryryrzr�s*



z_MultipleMatch.parseImpl)N)T)r�r�r�r�r�r1ryryrrzr�
sr�c@seZdZdZdd�ZdS)ra�
    Repetition of one or more of the given expression.
    
    Parameters:
     - expr - expression that must match one or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: BLACK"
        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
        
        # could also be written as
        (attr_expr * (1,)).parseString(text).pprint()
    cCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r�z}...r�r�ryryrzr�Js


zOneOrMore.__str__N)r�r�r�r�r�ryryryrzr0scs8eZdZdZd
�fdd�	Zd�fdd�	Zdd	�Z�ZS)r4aw
    Optional repetition of zero or more of the given expression.
    
    Parameters:
     - expr - expression that must match zero or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example: similar to L{OneOrMore}
    Ncstt|�j||d�d|_dS)N)r�T)rr4r�r�)r�rSr�rryrzr�_szZeroOrMore.__init__Tc	s<ztt|��|||�WSttfk
r6|gfYSXdSr�)rr4r�r!r�r�rryrzr�cszZeroOrMore.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r$�]...r�r�ryryrzr�is


zZeroOrMore.__str__)N)Trfryryrrzr4Ssc@s eZdZdd�ZeZdd�ZdS)�
_NullTokencCsdSr�ryr�ryryrzr�ssz_NullToken.__bool__cCsdSr�ryr�ryryrzr�vsz_NullToken.__str__N)r�r�r�r�rLr�ryryryrzr�rsr�cs6eZdZdZef�fdd�	Zd	dd�Zdd�Z�ZS)
raa
    Optional matching of the given expression.

    Parameters:
     - expr - expression that must match zero or more times
     - default (optional) - value to be returned if the optional expression is not found.

    Example::
        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
        zip.runTests('''
            # traditional ZIP code
            12345
            
            # ZIP+4 form
            12101-0001
            
            # invalid ZIP
            98765-
            ''')
    prints::
        # traditional ZIP code
        12345
        ['12345']

        # ZIP+4 form
        12101-0001
        ['12101-0001']

        # invalid ZIP
        98765-
             ^
        FAIL: Expected end of text (at char 5), (line:1, col:6)
    cs.tt|�j|dd�|jj|_||_d|_dS)NFr3T)rrr�rSr}rr�)r�rSrrryrzr��s
zOptional.__init__Tc	Cszz|jj|||dd�\}}WnTttfk
rp|jtk	rh|jjr^t|jg�}|j||jj<ql|jg}ng}YnX||fSr�)rSr�r!r�r�_optionalNotMatchedr|r$)r�rRr�r�r�ryryrzr��s


zOptional.parseImplcCs4t|d�r|jS|jdkr.dt|j�d|_|jS)Nr�r$r'r�r�ryryrzr��s


zOptional.__str__)T)	r�r�r�r�r�r�r�r�r1ryryrrzrzs"
cs,eZdZdZd	�fdd�	Zd
dd�Z�ZS)r*a�	
    Token for skipping over all undefined text until the matched expression is found.

    Parameters:
     - expr - target expression marking the end of the data to be skipped
     - include - (default=C{False}) if True, the target expression is also parsed 
          (the skipped text and target expression are returned as a 2-element list).
     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
          comments) that might contain false matches to the target expression
     - failOn - (default=C{None}) define expressions that are not allowed to be 
          included in the skipped test; if found before the target expression is found, 
          the SkipTo is not a match

    Example::
        report = '''
            Outstanding Issues Report - 1 Jan 2000

               # | Severity | Description                               |  Days Open
            -----+----------+-------------------------------------------+-----------
             101 | Critical | Intermittent system crash                 |          6
              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
              79 | Minor    | System slow when running too many reports |         47
            '''
        integer = Word(nums)
        SEP = Suppress('|')
        # use SkipTo to simply match everything up until the next SEP
        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
        # - parse action will call token.strip() for each matched token, i.e., the description body
        string_data = SkipTo(SEP, ignore=quotedString)
        string_data.setParseAction(tokenMap(str.strip))
        ticket_expr = (integer("issue_num") + SEP 
                      + string_data("sev") + SEP 
                      + string_data("desc") + SEP 
                      + integer("days_open"))
        
        for tkt in ticket_expr.searchString(report):
            print tkt.dump()
    prints::
        ['101', 'Critical', 'Intermittent system crash', '6']
        - days_open: 6
        - desc: Intermittent system crash
        - issue_num: 101
        - sev: Critical
        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
        - days_open: 14
        - desc: Spelling error on Login ('log|n')
        - issue_num: 94
        - sev: Cosmetic
        ['79', 'Minor', 'System slow when running too many reports', '47']
        - days_open: 47
        - desc: System slow when running too many reports
        - issue_num: 79
        - sev: Minor
    FNcs`tt|��|�||_d|_d|_||_d|_t|t	�rFt
�|�|_n||_dt
|j�|_dS)NTFzNo match found for )rr*r��
ignoreExprr�r��includeMatchr�r}r�r&rw�failOnr�rSr�)r�rZincluderr�rryrzr��s
zSkipTo.__init__Tc	Cs&|}t|�}|j}|jj}|jdk	r,|jjnd}|jdk	rB|jjnd}	|}
|
|kr�|dk	rf|||
�rfq�|	dk	r�z|	||
�}
Wqntk
r�Yq�YqnXqnz|||
ddd�Wq�tt	fk
r�|
d7}
YqJXq�qJt|||j
|��|
}|||�}t|�}|j�r||||dd�\}}
||
7}||fS)NF)r�r�r�r�)
r�rSr�r�r�r�r�rr!r�r�r$r�)r�rRr�r�rUr�rSZ
expr_parseZself_failOn_canParseNextZself_ignoreExpr_tryParseZtmplocZskiptextZ
skipresultrMryryrzr��s:
zSkipTo.parseImpl)FNN)Tr5ryryrrzr*�s6
csbeZdZdZd�fdd�	Zdd�Zdd�Zd	d
�Zdd�Zgfd
d�Z	dd�Z
�fdd�Z�ZS)raK
    Forward declaration of an expression to be defined later -
    used for recursive grammars, such as algebraic infix notation.
    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
    Specifically, '|' has a lower precedence than '<<', so that::
        fwdExpr << a | b | c
    will actually be evaluated as::
        (fwdExpr << a) | b | c
    thereby leaving b and c out as parseable alternatives.  It is recommended that you
    explicitly group the values inserted into the C{Forward}::
        fwdExpr << (a | b | c)
    Converting to use the '<<=' operator instead will avoid this problem.

    See L{ParseResults.pprint} for an example of a recursive parser created using
    C{Forward}.
    Ncstt|�j|dd�dSr2)rrr�r"rryrzr�@szForward.__init__cCsjt|t�rt�|�}||_d|_|jj|_|jj|_|�|jj	�|jj
|_
|jj|_|j�
|jj�|Sr�)r}r�r&rwrSr{r�r�r
rr~r}r�rr"ryryrz�
__lshift__Cs





zForward.__lshift__cCs||>Sr�ryr"ryryrz�__ilshift__PszForward.__ilshift__cCs
d|_|Sr�rr�ryryrzrSszForward.leaveWhitespacecCs$|js d|_|jdk	r |j��|Sr�)r�rSr�r�ryryrzr�Ws


zForward.streamlinecCs>||kr0|dd�|g}|jdk	r0|j�|�|�g�dSr�r�r�ryryrzr^s

zForward.validatecCsVt|d�r|jS|jjdSz|jdk	r4t|j�}nd}W5|j|_X|jjd|S)Nr�z: ...�Nonez: )r�r�rmr�Z_revertClass�_ForwardNoRecurserSr�)r�Z	retStringryryrzr�es


zForward.__str__cs.|jdk	rtt|���St�}||K}|SdSr�)rSrrr�r1rryrzr�vs

zForward.copy)N)
r�r�r�r�r�r�r�rr�rr�r�r1ryryrrzr-s
c@seZdZdd�ZdS)r�cCsdS)Nrcryr�ryryrzr�sz_ForwardNoRecurse.__str__N)r�r�r�r�ryryryrzr�~sr�cs"eZdZdZd�fdd�	Z�ZS)r/zQ
    Abstract subclass of C{ParseExpression}, for converting parsed results.
    Fcstt|��|�d|_dSr�)rr/r�r}r�rryrzr��szTokenConverter.__init__)Fr4ryryrrzr/�scs6eZdZdZd
�fdd�	Z�fdd�Zdd	�Z�ZS)ra�
    Converter to concatenate all matching tokens to a single string.
    By default, the matching patterns must also be contiguous in the input string;
    this can be disabled by specifying C{'adjacent=False'} in the constructor.

    Example::
        real = Word(nums) + '.' + Word(nums)
        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
        # will also erroneously match the following
        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

        real = Combine(Word(nums) + '.' + Word(nums))
        print(real.parseString('3.1416')) # -> ['3.1416']
        # no match when there are internal spaces
        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    r�Tcs8tt|��|�|r|��||_d|_||_d|_dSr�)rrr�r�adjacentr~�
joinStringr�)r�rSr�r�rryrzr��szCombine.__init__cs(|jrt�||�ntt|��|�|Sr�)r�r&rrrr"rryrzr�szCombine.ignorecCsP|��}|dd�=|td�|�|j��g|jd�7}|jrH|��rH|gS|SdS)Nr�)r�)r�r$r�r(r�r�r|r
)r�rRr�r�ZretToksryryrzr��s
"zCombine.postParse)r�T)r�r�r�r�r�rr�r1ryryrrzr�s
cs(eZdZdZ�fdd�Zdd�Z�ZS)ra�
    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

    Example::
        ident = Word(alphas)
        num = Word(nums)
        term = ident | num
        func = ident + Optional(delimitedList(term))
        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

        func = ident + Group(Optional(delimitedList(term)))
        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    cstt|��|�d|_dSr�)rrr�r}r�rryrzr��szGroup.__init__cCs|gSr�ryr�ryryrzr��szGroup.postParse�r�r�r�r�r�r�r1ryryrrzr�s
cs(eZdZdZ�fdd�Zdd�Z�ZS)r
aW
    Converter to return a repetitive expression as a list, but also as a dictionary.
    Each element can also be referenced using the first token in the expression as its key.
    Useful for tabular report scraping when the first column can be used as a item key.

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        # print attributes as plain groups
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
        print(result.dump())
        
        # access named fields as dict entries, or output as dict
        print(result['shape'])        
        print(result.asDict())
    prints::
        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    See more examples at L{ParseResults} of accessing fields by results name.
    cstt|��|�d|_dSr�)rr
r�r}r�rryrzr��sz
Dict.__init__cCs�t|�D]�\}}t|�dkrq|d}t|t�r@t|d���}t|�dkr\td|�||<qt|�dkr�t|dt�s�t|d|�||<q|��}|d=t|�dks�t|t�r�|�	�r�t||�||<qt|d|�||<q|j
r�|gS|SdS)Nrr�r�rs)r�r�r}rvr�r�r�r$r�r
r|)r�rRr�r�r��tokZikeyZ	dictvalueryryrzr��s$
zDict.postParser�ryryrrzr
�s#c@s eZdZdZdd�Zdd�ZdS)r-aV
    Converter for ignoring the results of a parsed expression.

    Example::
        source = "a, b, c,d"
        wd = Word(alphas)
        wd_list1 = wd + ZeroOrMore(',' + wd)
        print(wd_list1.parseString(source))

        # often, delimiters that are useful during parsing are just in the
        # way afterward - use Suppress to keep them out of the parsed output
        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
        print(wd_list2.parseString(source))
    prints::
        ['a', ',', 'b', ',', 'c', ',', 'd']
        ['a', 'b', 'c', 'd']
    (See also L{delimitedList}.)
    cCsgSr�ryr�ryryrzr�szSuppress.postParsecCs|Sr�ryr�ryryrzr
"szSuppress.suppressN)r�r�r�r�r�r
ryryryrzr-sc@s(eZdZdZdd�Zdd�Zdd�ZdS)	rzI
    Wrapper for parse actions, to ensure they are only called once.
    cCst|�|_d|_dSr�)rr�callable�called)r�Z
methodCallryryrzr�*s
zOnlyOnce.__init__cCs.|js|�|||�}d|_|St||d��dS)NTr�)r�r�r!)r�r�r[rxrNryryrzr	-s
zOnlyOnce.__call__cCs
d|_dSr�)r�r�ryryrz�reset3szOnlyOnce.resetN)r�r�r�r�r�r	r�ryryryrzr&scs:t����fdd�}z�j|_Wntk
r4YnX|S)at
    Decorator for debugging parse actions. 
    
    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

    Example::
        wd = Word(alphas)

        @traceParseAction
        def remove_duplicate_chars(tokens):
            return ''.join(sorted(set(''.join(tokens))))

        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    prints::
        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
        <<leaving remove_duplicate_chars (ret: 'dfjkls')
        ['dfjkls']
    c
s��j}|dd�\}}}t|�dkr8|djjd|}tj�d|t||�||f�z�|�}Wn8tk
r�}ztj�d||f��W5d}~XYnXtj�d||f�|S)Nr^rqr�.z">>entering %s(line: '%s', %d, %r)
z<<leaving %s (exception: %s)
z<<leaving %s (ret: %r)
)r�r�rmr��stderr�writerIrp)ZpaArgsZthisFuncr�r[rxr�rX�rryrz�zLsztraceParseAction.<locals>.z)rrr�r�)rr�ryr�rzrd6s
�,FcCs`t|�dt|�dt|�d}|rBt|t||���|�S|tt|�|��|�SdS)a�
    Helper to define a delimited list of expressions - the delimiter defaults to ','.
    By default, the list elements and delimiters can have intervening whitespace, and
    comments, but this can be overridden by passing C{combine=True} in the constructor.
    If C{combine} is set to C{True}, the matching tokens are returned as a single token
    string, with the delimiters included; otherwise, the matching tokens are returned
    as a list of tokens, with the delimiters suppressed.

    Example::
        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    z [r(r�N)r�rr4r�r-)rSZdelim�combineZdlNameryryrzrBbs
$csjt����fdd�}|dkr0tt��dd��}n|��}|�d�|j|dd�|��d	t��d
�S)a:
    Helper to define a counted list of expressions.
    This helper defines a pattern of the form::
        integer expr expr expr...
    where the leading integer tells how many expr expressions follow.
    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    
    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

    Example::
        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

        # in this parser, the leading integer value is given in binary,
        # '10' indicating that 2 values are in the array
        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    cs.|d}�|r tt�g|��p&tt�>gSr�)rrrE)r�r[rxr��Z	arrayExprrSryrz�countFieldParseAction�s"z+countedArray.<locals>.countFieldParseActionNcSst|d�Sr�)rvrwryryrzr{�r|zcountedArray.<locals>.<lambda>ZarrayLenT�r�z(len) rc)rr1rTr�r�r�r�r�)rSZintExprr�ryr�rzr>us
cCs6g}|D](}t|t�r&|�t|��q|�|�q|Sr�)r}r�rr�r)�Lr�r�ryryrzr��s
r�cs6t���fdd�}|j|dd���dt|���S)a*
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousLiteral(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    If this is not desired, use C{matchPreviousExpr}.
    Do I{not} use with packrat parsing enabled.
    csP|rBt|�dkr�|d>qLt|���}�tdd�|D��>n
�t�>dS)Nr�rcss|]}t|�VqdSr�)r�r�Zttryryrzr��szDmatchPreviousLiteral.<locals>.copyTokenToRepeater.<locals>.<genexpr>)r�r�r�rr)r�r[rxZtflat�Zrepryrz�copyTokenToRepeater�sz1matchPreviousLiteral.<locals>.copyTokenToRepeaterTr��(prev) )rr�r�r�)rSr�ryr�rzrQ�s


csFt��|��}�|K��fdd�}|j|dd���dt|���S)aS
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousExpr(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    the expressions are evaluated first, and then compared, so
    C{"1"} is compared with C{"10"}.
    Do I{not} use with packrat parsing enabled.
    cs*t|�����fdd�}�j|dd�dS)Ncs$t|���}|�kr tddd��dS)Nr�r)r�r�r!)r�r[rxZtheseTokens�ZmatchTokensryrz�mustMatchTheseTokens�szLmatchPreviousExpr.<locals>.copyTokenToRepeater.<locals>.mustMatchTheseTokensTr�)r�r�r�)r�r[rxr�r�r�rzr��sz.matchPreviousExpr.<locals>.copyTokenToRepeaterTr�r�)rr�r�r�r�)rSZe2r�ryr�rzrP�scCs:dD]}|�|t|�}q|�dd�}|�dd�}t|�S)Nz\^-]r2r'r~r�)r��_bslashr�)r�r�ryryrzrY�s
rYTc
s�|rdd�}dd�}t�ndd�}dd�}t�g}t|t�rF|��}n$t|t�rZt|�}ntjdt	dd�|stt
�Sd	}|t|�d
k�r||}t||d
d��D]R\}}	||	|�r�|||d
=qxq�|||	�r�|||d
=|�
||	�|	}qxq�|d
7}qx|�s�|�r�zlt|�td�|��k�rTtd
d�dd�|D����d�|��WStd�dd�|D����d�|��WSWn&tk
�r�tjdt	dd�YnXt�fdd�|D���d�|��S)a�
    Helper to quickly define a set of alternative Literals, and makes sure to do
    longest-first testing when there is a conflict, regardless of the input order,
    but returns a C{L{MatchFirst}} for best performance.

    Parameters:
     - strs - a string of space-delimited literals, or a collection of string literals
     - caseless - (default=C{False}) - treat all literals as caseless
     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
          if creating a C{Regex} raises an exception)

    Example::
        comp_oper = oneOf("< = > <= >= !=")
        var = Word(alphas)
        number = Word(nums)
        term = var | number
        comparison_expr = term + comp_oper + term
        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    prints::
        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    cSs|��|��kSr�)r@�r�bryryrzr{�r|zoneOf.<locals>.<lambda>cSs|���|���Sr�)r@r<r�ryryrzr{�r|cSs||kSr�ryr�ryryrzr{�r|cSs
|�|�Sr�)r<r�ryryrzr{�r|z6Invalid argument to oneOf, expected string or iterablersr�rr�Nr�z[%s]css|]}t|�VqdSr�)rY�r�Zsymryryrzr�szoneOf.<locals>.<genexpr>r��|css|]}t�|�VqdSr�)r�r[r�ryryrzr�sz7Exception creating Regex for oneOf, building MatchFirstc3s|]}�|�VqdSr�ryr��ZparseElementClassryrzr�$s)r
rr}r�r�rr�r�r�r�rr�r�rr�r)r�rpr)
Zstrsr?ZuseRegexZisequalZmasksZsymbolsr�Zcurr�rryr�rzrU�sT



�


**�cCsttt||���S)a�
    Helper to easily and clearly define a dictionary by specifying the respective patterns
    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    in the proper order.  The key pattern can include delimiting markers or punctuation,
    as long as they are suppressed, thereby leaving the significant key text.  The value
    pattern can include named results, so that the C{Dict} results can include named token
    fields.

    Example::
        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        attr_label = label
        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

        # similar to Dict, but simpler call format
        result = dictOf(attr_label, attr_value).parseString(text)
        print(result.dump())
        print(result['shape'])
        print(result.shape)  # object attribute access works too
        print(result.asDict())
    prints::
        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        SQUARE
        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    )r
r4r)rr�ryryrzrC&s!cCs^t��dd��}|��}d|_|d�||d�}|r@dd�}ndd�}|�|�|j|_|S)	a�
    Helper to return the original, untokenized text for a given expression.  Useful to
    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    revert separate tokens with intervening whitespace back to the original matching
    input text. By default, returns astring containing the original parsed text.  
       
    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    C{L{ParseResults}} containing any results names that were originally matched, and a 
    single token containing the original matched text from the input string.  So if 
    the expression passed to C{L{originalTextFor}} contains expressions with defined
    results names, you must set C{asString} to C{False} if you want to preserve those
    results name values.

    Example::
        src = "this is test <b> bold <i>text</i> </b> normal text "
        for tag in ("b","i"):
            opener,closer = makeHTMLTags(tag)
            patt = originalTextFor(opener + SkipTo(closer) + closer)
            print(patt.searchString(src)[0])
    prints::
        ['<b> bold <i>text</i> </b>']
        ['<i>text</i>']
    cSs|Sr�ry)r�r�rxryryrzr{ar|z!originalTextFor.<locals>.<lambda>F�_original_start�
_original_endcSs||j|j�Sr�)r�r�rZryryrzr{fr|cSs&||�d�|�d��g|dd�<dS)Nr�r�)rrZryryrz�extractTexthsz$originalTextFor.<locals>.extractText)rr�r�r�r�)rSZasStringZ	locMarkerZendlocMarker�	matchExprr�ryryrzriIs

cCst|��dd��S)zp
    Helper to undo pyparsing's default grouping of And expressions, even
    if all but one are non-empty.
    cSs|dSr�ryrwryryrzr{sr|zungroup.<locals>.<lambda>)r/r�)rSryryrzrjnscCs4t��dd��}t|d�|d�|����d��S)a�
    Helper to decorate a returned token with its starting and ending locations in the input string.
    This helper adds the following results names:
     - locn_start = location where matched expression begins
     - locn_end = location where matched expression ends
     - value = the actual parsed results

    Be careful if the input text contains C{<TAB>} characters, you may want to call
    C{L{ParserElement.parseWithTabs}}

    Example::
        wd = Word(alphas)
        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
            print(match)
    prints::
        [[0, 'ljsdf', 5]]
        [[8, 'lksdjjf', 15]]
        [[18, 'lkkjj', 23]]
    cSs|Sr�ryrZryryrzr{�r|zlocatedExpr.<locals>.<lambda>Z
locn_startr�Zlocn_end)rr�rr�r)rSZlocatorryryrzrlusz\[]-*.$+^?()~ �r_cCs|ddSr�ryrZryryrzr{�r|r{z\\0?[xX][0-9a-fA-F]+cCstt|d�d�d��S)Nrz\0x�)�unichrrv�lstriprZryryrzr{�r|z	\\0[0-7]+cCstt|ddd�d��S)Nrr��)r�rvrZryryrzr{�r|z\]r�r$r)�negate�bodyr'csFdd��z"d��fdd�t�|�jD��WStk
r@YdSXdS)a�
    Helper to easily define string ranges for use in Word construction.  Borrows
    syntax from regexp '[]' string range definitions::
        srange("[0-9]")   -> "0123456789"
        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    The input string must be enclosed in []'s, and the returned string is the expanded
    character set joined into a single string.
    The values enclosed in the []'s may be:
     - a single character
     - an escaped character with a leading backslash (such as C{\-} or C{\]})
     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
         (C{\0x##} is also supported for backwards compatibility) 
     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    cSs<t|t�s|Sd�dd�tt|d�t|d�d�D��S)Nr�css|]}t|�VqdSr�)r�r�ryryrzr��sz+srange.<locals>.<lambda>.<locals>.<genexpr>rr�)r}r$r�r��ord)�pryryrzr{�r|zsrange.<locals>.<lambda>r�c3s|]}�|�VqdSr�ry)r��part�Z	_expandedryrzr��szsrange.<locals>.<genexpr>N)r��_reBracketExprr�r�rprdryr�rzra�s
"cs�fdd�}|S)zt
    Helper method for defining parse actions that require matching at a specific
    column in the input text.
    cs"t||��krt||d���dS)Nzmatched token not at column %dr�)rNZlocnrVr�ryrz�	verifyCol�sz!matchOnlyAtCol.<locals>.verifyColry)r�r�ryr�rzrO�scs�fdd�S)a�
    Helper method for common parse actions that simply return a literal value.  Especially
    useful when used with C{L{transformString<ParserElement.transformString>}()}.

    Example::
        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
        term = na | num
        
        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    cs�gSr�ryrZ�ZreplStrryrzr{�r|zreplaceWith.<locals>.<lambda>ryr�ryr�rzr^�scCs|ddd�S)a
    Helper parse action for removing quotation marks from parsed quoted strings.

    Example::
        # by default, quotation marks are included in parsed results
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

        # use removeQuotes to strip quotation marks from parsed results
        quotedString.setParseAction(removeQuotes)
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    rr�rtryrZryryrzr\�scsN��fdd�}zt�dt�d�j�}Wntk
rBt��}YnX||_|S)aG
    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    args are passed, they are forwarded to the given function as additional arguments after
    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    parsed data to an integer using base 16.

    Example (compare the last to example in L{ParserElement.transformString}::
        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
        hex_ints.runTests('''
            00 11 22 aa FF 0a 0d 1a
            ''')
        
        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
        OneOrMore(upperword).runTests('''
            my kingdom for a horse
            ''')

        wd = Word(alphas).setParseAction(tokenMap(str.title))
        OneOrMore(wd).setParseAction(' '.join).runTests('''
            now is the winter of our discontent made glorious summer by this sun of york
            ''')
    prints::
        00 11 22 aa FF 0a 0d 1a
        [0, 17, 34, 170, 255, 10, 13, 26]

        my kingdom for a horse
        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

        now is the winter of our discontent made glorious summer by this sun of york
        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    cs��fdd�|D�S)Ncsg|]}�|f����qSryry)r�Ztokn�r�r\ryrzr��sz(tokenMap.<locals>.pa.<locals>.<listcomp>ryrZr�ryrzr��sztokenMap.<locals>.par�rm)ror�rpr~)r\r�r�rqryr�rzro�s 
�cCst|���Sr��r�r@rwryryrzr{r|cCst|���Sr��r��lowerrwryryrzr{r|c	Cs�t|t�r|}t||d�}n|j}tttd�}|r�t���	t
�}td�|d�tt
t|td�|���tddgd��d	��	d
d��td�}n�d
�dd�tD��}t���	t
�t|�B}td�|d�tt
t|�	t�ttd�|����tddgd��d	��	dd��td�}ttd�|d�}|�dd
�|�dd��������d|�}|�dd
�|�dd��������d|�}||_||_||fS)zRInternal helper to construct opening and closing tag expressions, given a tag namerEz_-:r5�tag�=�/F�rrEcSs|ddkS�Nrr�ryrZryryrzr{r|z_makeTags.<locals>.<lambda>r6r�css|]}|dkr|VqdS)r6Nryr�ryryrzr�sz_makeTags.<locals>.<genexpr>cSs|ddkSr�ryrZryryrzr{r|r7rJ�:r(z<%s>r`z</%s>)r}r�rr�r1r6r5r@r�r�r\r-r
r4rrr�r�rXr[rDr�_Lr��titler�r�r�)�tagStrZxmlZresnameZtagAttrNameZtagAttrValueZopenTagZprintablesLessRAbrackZcloseTagryryrz�	_makeTagss>
�������..rcCs
t|d�S)a 
    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

    Example::
        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
        a,a_end = makeHTMLTags("A")
        link_expr = a + SkipTo(a_end)("link_text") + a_end
        
        for link in link_expr.searchString(text):
            # attributes in the <A> tag (like "href" shown here) are also accessible as named results
            print(link.link_text, '->', link.href)
    prints::
        pyparsing -> http://pyparsing.wikispaces.com
    F�r�r�ryryrzrM(scCs
t|d�S)z�
    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    tags only in the given upper/lower case.

    Example: similar to L{makeHTMLTags}
    TrrryryrzrN;scs8|r|dd��n|���dd��D���fdd�}|S)a<
    Helper to create a validating parse action to be used with start tags created
    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    with a required attribute value, to avoid false matches on common tags such as
    C{<TD>} or C{<DIV>}.

    Call C{withAttribute} with a series of attribute names and values. Specify the list
    of filter attributes names and values as:
     - keyword arguments, as in C{(align="right")}, or
     - as an explicit dict with C{**} operator, when an attribute name is also a Python
          reserved word, as in C{**{"class":"Customer", "align":"right"}}
     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
    For attribute names with a namespace prefix, you must use the second form.  Attribute
    names are matched insensitive to upper/lower case.
       
    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.

    To verify that the attribute exists, but without specifying a value, pass
    C{withAttribute.ANY_VALUE} as the value.

    Example::
        html = '''
            <div>
            Some text
            <div type="grid">1 4 0 1 0</div>
            <div type="graph">1,3 2,3 1,1</div>
            <div>this has no type</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")

        # only match div tag having a type attribute with value "grid"
        div_grid = div().setParseAction(withAttribute(type="grid"))
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        # construct a match with any div tag having a type attribute, regardless of the value
        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    NcSsg|]\}}||f�qSryryr/ryryrzr�zsz!withAttribute.<locals>.<listcomp>csZ�D]P\}}||kr$t||d|��|tjkr|||krt||d||||f��qdS)Nzno matching attribute z+attribute '%s' has value '%s', must be '%s')r!rg�	ANY_VALUE)r�r[r�ZattrNameZ	attrValue�Zattrsryrzr�{s�zwithAttribute.<locals>.pa)r�)r�ZattrDictr�ryrrzrgDs2cCs|rd|nd}tf||i�S)a�
    Simplified version of C{L{withAttribute}} when matching on a div class - made
    difficult because C{class} is a reserved word in Python.

    Example::
        html = '''
            <div>
            Some text
            <div class="grid">1 4 0 1 0</div>
            <div class="graph">1,3 2,3 1,1</div>
            <div>this &lt;div&gt; has no class</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")
        div_grid = div().setParseAction(withClass("grid"))
        
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    z%s:class�class)rg)Z	classname�	namespaceZ	classattrryryrzrm�s �(rpcCs�t�}||||B}t|�D�]l\}}|ddd�\}}	}
}|	dkrPd|nd|}|	dkr�|dkstt|�dkr|td��|\}
}t��|�}|
tjk�r^|	d	kr�t||�t|t	|��}n�|	dk�r|dk	r�t|||�t|t	||��}nt||�t|t	|��}nD|	dk�rTt||
|||�t||
|||�}ntd
��n�|
tj
k�rB|	d	k�r�t|t��s�t|�}t|j
|�t||�}n�|	dk�r�|dk	�r�t|||�t|t	||��}nt||�t|t	|��}nD|	dk�r8t||
|||�t||
|||�}ntd
��ntd��|�rvt|ttf��rl|j|�n
|�|�||�|�|BK}|}q||K}|S)aD

    Helper method for constructing grammars of expressions made up of
    operators working in a precedence hierarchy.  Operators may be unary or
    binary, left- or right-associative.  Parse actions can also be attached
    to operator expressions. The generated parser will also recognize the use 
    of parentheses to override operator precedences (see example below).
    
    Note: if you define a deep operator list, you may see performance issues
    when using infixNotation. See L{ParserElement.enablePackrat} for a
    mechanism to potentially improve your parser performance.

    Parameters:
     - baseExpr - expression representing the most basic element for the nested
     - opList - list of tuples, one for each operator precedence level in the
      expression grammar; each tuple is of the form
      (opExpr, numTerms, rightLeftAssoc, parseAction), where:
       - opExpr is the pyparsing expression for the operator;
          may also be a string, which will be converted to a Literal;
          if numTerms is 3, opExpr is a tuple of two expressions, for the
          two operators separating the 3 terms
       - numTerms is the number of terms for this operator (must
          be 1, 2, or 3)
       - rightLeftAssoc is the indicator whether the operator is
          right or left associative, using the pyparsing-defined
          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
       - parseAction is the parse action to be associated with
          expressions matching this operator expression (the
          parse action tuple member may be omitted); if the parse action
          is passed a tuple or list of functions, this is equivalent to
          calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})

    Example::
        # simple example of four-function arithmetic with ints and variable names
        integer = pyparsing_common.signed_integer
        varname = pyparsing_common.identifier 
        
        arith_expr = infixNotation(integer | varname,
            [
            ('-', 1, opAssoc.RIGHT),
            (oneOf('* /'), 2, opAssoc.LEFT),
            (oneOf('+ -'), 2, opAssoc.LEFT),
            ])
        
        arith_expr.runTests('''
            5+3*6
            (5+3)*6
            -2--11
            ''', fullDump=False)
    prints::
        5+3*6
        [[5, '+', [3, '*', 6]]]

        (5+3)*6
        [[[5, '+', 3], '*', 6]]

        -2--11
        [[['-', 2], '-', ['-', 11]]]
    r�Nrbrqz%s termz	%s%s termrsz@if numterms=3, opExpr must be a tuple or list of two expressionsr�z6operator must be unary (1), binary (2), or ternary (3)z2operator must indicate right or left associativity)rr�r�r�r�rV�LEFTrrr�RIGHTr}rrSr�r�r�)ZbaseExprZopListZlparZrparr�ZlastExprr�ZoperDefZopExprZarityZrightLeftAssocr�ZtermNameZopExpr1ZopExpr2ZthisExprr�ryryrzrk�sZ=
&
�



&
�

z4"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*�"z string enclosed in double quotesz4'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*�'z string enclosed in single quotesz*quotedString using single or double quotes�uzunicode string literalcCs�||krtd��|dk�r*t|t��r"t|t��r"t|�dkr�t|�dkr�|dk	r�tt|t||tjdd����	dd��}n$t
��t||tj��	dd��}nx|dk	r�tt|t|�t|�ttjdd����	dd��}n4ttt|�t|�ttjdd����	d	d��}ntd
��t
�}|dk	�rd|tt|�t||B|B�t|��K}n$|tt|�t||B�t|��K}|�d||f�|S)a~	
    Helper method for defining nested lists enclosed in opening and closing
    delimiters ("(" and ")" are the default).

    Parameters:
     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
     - content - expression for items within the nested lists (default=C{None})
     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})

    If an expression is not provided for the content argument, the nested
    expression will capture all whitespace-delimited content between delimiters
    as a list of separate values.

    Use the C{ignoreExpr} argument to define expressions that may contain
    opening or closing characters that should not be treated as opening
    or closing characters for nesting, such as quotedString or a comment
    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
    The default is L{quotedString}, but if no expressions are to be ignored,
    then pass C{None} for this argument.

    Example::
        data_type = oneOf("void int short long char float double")
        decl_data_type = Combine(data_type + Optional(Word('*')))
        ident = Word(alphas+'_', alphanums+'_')
        number = pyparsing_common.number
        arg = Group(decl_data_type + ident)
        LPAR,RPAR = map(Suppress, "()")

        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))

        c_function = (decl_data_type("type") 
                      + ident("name")
                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 
                      + code_body("body"))
        c_function.ignore(cStyleComment)
        
        source_code = '''
            int is_odd(int x) { 
                return (x%2); 
            }
                
            int dec_to_hex(char hchar) { 
                if (hchar >= '0' && hchar <= '9') { 
                    return (ord(hchar)-ord('0')); 
                } else { 
                    return (10+ord(hchar)-ord('A'));
                } 
            }
        '''
        for func in c_function.searchString(source_code):
            print("%(name)s (%(type)s) args: %(args)s" % func)

    prints::
        is_odd (int) args: [['int', 'x']]
        dec_to_hex (int) args: [['char', 'hchar']]
    z.opening and closing strings cannot be the sameNr�r�cSs|d��Sr��r�rwryryrzr{gr|znestedExpr.<locals>.<lambda>cSs|d��Sr�r
rwryryrzr{jr|cSs|d��Sr�r
rwryryrzr{pr|cSs|d��Sr�r
rwryryrzr{tr|zOopening and closing arguments must be strings if no content expression is givenznested %s%s expression)r�r}r�r�rrrr&rsr�rEr�rrrr-r4r�)ZopenerZcloserZcontentr�r�ryryrzrR%sH:
���������
*$cs��fdd�}�fdd�}�fdd�}tt��d����}t�t��|��d�}t��|��d	�}t��|��d
�}	|r�tt|�|t|t|�t|��|	�}
n$tt|�t|t|�t|���}
|�	t
t��|
�d�S)a
	
    Helper method for defining space-delimited indentation blocks, such as
    those used to define block statements in Python source code.

    Parameters:
     - blockStatementExpr - expression defining syntax of statement that
            is repeated within the indented block
     - indentStack - list created by caller to manage indentation stack
            (multiple statementWithIndentedBlock expressions within a single grammar
            should share a common indentStack)
     - indent - boolean indicating whether block must be indented beyond the
            the current level; set to False for block of left-most statements
            (default=C{True})

    A valid block must contain at least one C{blockStatement}.

    Example::
        data = '''
        def A(z):
          A1
          B = 100
          G = A2
          A2
          A3
        B
        def BB(a,b,c):
          BB1
          def BBA():
            bba1
            bba2
            bba3
        C
        D
        def spam(x,y):
             def eggs(z):
                 pass
        '''


        indentStack = [1]
        stmt = Forward()

        identifier = Word(alphas, alphanums)
        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
        func_body = indentedBlock(stmt, indentStack)
        funcDef = Group( funcDecl + func_body )

        rvalue = Forward()
        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
        rvalue << (funcCall | identifier | Word(nums))
        assignment = Group(identifier + "=" + rvalue)
        stmt << ( funcDef | assignment | identifier )

        module_body = OneOrMore(stmt)

        parseTree = module_body.parseString(data)
        parseTree.pprint()
    prints::
        [['def',
          'A',
          ['(', 'z', ')'],
          ':',
          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
         'B',
         ['def',
          'BB',
          ['(', 'a', 'b', 'c', ')'],
          ':',
          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
         'C',
         'D',
         ['def',
          'spam',
          ['(', 'x', 'y', ')'],
          ':',
          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 
    csN|t|�krdSt||�}|�dkrJ|�dkr>t||d��t||d��dS)Nrtzillegal nestingznot a peer entry)r�r;r#r!�r�r[rxZcurCol��indentStackryrz�checkPeerIndent�s
z&indentedBlock.<locals>.checkPeerIndentcs2t||�}|�dkr"��|�nt||d��dS)Nrtznot a subentry)r;rr!rrryrz�checkSubIndent�s
z%indentedBlock.<locals>.checkSubIndentcsN|t|�krdSt||�}�r6|�dkr6|�dksBt||d�����dS)Nrtr_znot an unindent)r�r;r!rrrryrz�
checkUnindent�s
z$indentedBlock.<locals>.checkUnindentz	 �INDENTr�ZUNINDENTzindented block)rrr
r
rr�r�rrrr�)ZblockStatementExprrr9rrrrErZPEERZUNDENTZsmExprryrrzrhs(N����z#[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]z[\0xa1-\0xbf\0xd7\0xf7]z_:zany tagzgt lt amp nbsp quot aposz><& "'z&(?P<entity>r�z);zcommon HTML entitycCst�|j�S)zRHelper parser action to replace common HTML entities with their special characters)�_htmlEntityMapr�Zentityrwryryrzr]�sz/\*(?:[^*]|\*(?!/))*z*/zC style commentz<!--[\s\S]*?-->zHTML commentz.*zrest of linez//(?:\\\n|[^\n])*z
// commentzC++ style commentz#.*zPython style commentrO� 	�	commaItemr�c@s�eZdZdZee�Zee�Ze	e
��d��e�Z
e	e��d��eed��Zed��d��e�Ze��e�de��e��d�Ze�d	d
��eeeed���e�B�d�Ze�e�ed
��d��e�Zed��d��e�ZeeBeB��Zed��d��e�Ze	eded��d�Zed��d�Z ed��d�Z!e!de!d�d�Z"ee!de!d�dee!de!d��d�Z#e#�$dd
��d e �d!�Z%e&e"e%Be#B�d"���d"�Z'ed#��d$�Z(e)d=d&d'��Z*e)d>d)d*��Z+ed+��d,�Z,ed-��d.�Z-ed/��d0�Z.e/��e0��BZ1e)d1d2��Z2e&e3e4d3�e5�e	e6d3d4�ee7d5�������d6�Z8e9ee:�;�e8Bd7d8���d9�Z<e)ed:d
���Z=e)ed;d
���Z>d<S)?rpa�

    Here are some common low-level expressions that may be useful in jump-starting parser development:
     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
     - common L{programming identifiers<identifier>}
     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
     - L{UUID<uuid>}
     - L{comma-separated list<comma_separated_list>}
    Parse actions:
     - C{L{convertToInteger}}
     - C{L{convertToFloat}}
     - C{L{convertToDate}}
     - C{L{convertToDatetime}}
     - C{L{stripHTMLTags}}
     - C{L{upcaseTokens}}
     - C{L{downcaseTokens}}

    Example::
        pyparsing_common.number.runTests('''
            # any int or real number, returned as the appropriate type
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.fnumber.runTests('''
            # any int or real number, returned as float
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.hex_integer.runTests('''
            # hex numbers
            100
            FF
            ''')

        pyparsing_common.fraction.runTests('''
            # fractions
            1/2
            -3/4
            ''')

        pyparsing_common.mixed_integer.runTests('''
            # mixed fractions
            1
            1/2
            -3/4
            1-3/4
            ''')

        import uuid
        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
        pyparsing_common.uuid.runTests('''
            # uuid
            12345678-1234-5678-1234-567812345678
            ''')
    prints::
        # any int or real number, returned as the appropriate type
        100
        [100]

        -100
        [-100]

        +100
        [100]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # any int or real number, returned as float
        100
        [100.0]

        -100
        [-100.0]

        +100
        [100.0]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # hex numbers
        100
        [256]

        FF
        [255]

        # fractions
        1/2
        [0.5]

        -3/4
        [-0.75]

        # mixed fractions
        1
        [1]

        1/2
        [0.5]

        -3/4
        [-0.75]

        1-3/4
        [1.75]

        # uuid
        12345678-1234-5678-1234-567812345678
        [UUID('12345678-1234-5678-1234-567812345678')]
    �integerzhex integerr�z[+-]?\d+zsigned integerr��fractioncCs|d|dS)Nrrtryrwryryrzr{�r|zpyparsing_common.<lambda>r�z"fraction or mixed integer-fractionz
[+-]?\d+\.\d*zreal numberz+[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)z$real number with scientific notationz[+-]?\d+\.?\d*([eE][+-]?\d+)?�fnumberr��
identifierzK(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}zIPv4 addressz[0-9a-fA-F]{1,4}�hex_integerr��zfull IPv6 address)rrhz::zshort IPv6 addresscCstdd�|D��dkS)Ncss|]}tj�|�rdVqdSr3)rp�
_ipv6_partr�r�ryryrzr��sz,pyparsing_common.<lambda>.<locals>.<genexpr>r�)r�rwryryrzr{�r|z::ffff:zmixed IPv6 addresszIPv6 addressz:[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}zMAC address�%Y-%m-%dcs�fdd�}|S)a�
        Helper to create a parse action for converting parsed date string to Python datetime.date

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})

        Example::
            date_expr = pyparsing_common.iso8601_date.copy()
            date_expr.setParseAction(pyparsing_common.convertToDate())
            print(date_expr.parseString("1999-12-31"))
        prints::
            [datetime.date(1999, 12, 31)]
        c
sNzt�|d����WStk
rH}zt||t|���W5d}~XYnXdSr�)r�strptime�dater�r!r~�r�r[rxZve��fmtryrz�cvt_fn�sz.pyparsing_common.convertToDate.<locals>.cvt_fnry�r$r%ryr#rz�
convertToDate�szpyparsing_common.convertToDate�%Y-%m-%dT%H:%M:%S.%fcs�fdd�}|S)a
        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})

        Example::
            dt_expr = pyparsing_common.iso8601_datetime.copy()
            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
        prints::
            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
        c
sJzt�|d��WStk
rD}zt||t|���W5d}~XYnXdSr�)rr r�r!r~r"r#ryrzr%�sz2pyparsing_common.convertToDatetime.<locals>.cvt_fnryr&ryr#rz�convertToDatetime�sz"pyparsing_common.convertToDatetimez7(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?zISO8601 datez�(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?zISO8601 datetimez2[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}�UUIDcCstj�|d�S)a
        Parse action to remove HTML tags from web page HTML source

        Example::
            # strip HTML links from normal text 
            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
            td,td_end = makeHTMLTags("TD")
            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
            
            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
        r)rp�_html_stripperr�)r�r[r�ryryrz�
stripHTMLTagss
zpyparsing_common.stripHTMLTagsr�rOrrr�r�zcomma separated listcCst|���Sr�r�rwryryrzr{"r|cCst|���Sr�r�rwryryrzr{%r|N)r)r()?r�r�r�r�rorvZconvertToInteger�floatZconvertToFloatr1rTr�r�rrFrr)Zsigned_integerrr�rr
Z
mixed_integerr��realZsci_realr��numberrr6r5rZipv4_addressrZ_full_ipv6_addressZ_short_ipv6_addressr�Z_mixed_ipv6_addressrZipv6_addressZmac_addressr/r'r)Ziso8601_dateZiso8601_datetime�uuidr9r8r+r,rrrrXr0�
_commasepitemrBr[r�Zcomma_separated_listrfrDryryryrzrpsV""
2
 
���__main__Zselect�fromr=r�)r��columnsr�ZtablesZcommandaK
        # '*' as column list and dotted table name
        select * from SYS.XYZZY

        # caseless match on "SELECT", and casts back to "select"
        SELECT * from XYZZY, ABC

        # list of column names, and mixed case SELECT keyword
        Select AA,BB,CC from Sys.dual

        # multiple tables
        Select A, B, C from Sys.dual, Table2

        # invalid SELECT keyword - should fail
        Xelect A, B, C from Sys.dual

        # incomplete command - should fail
        Select

        # invalid column name - should fail
        Select ^^^ frox Sys.dual

        z]
        100
        -100
        +100
        3.14159
        6.02e23
        1e-12
        z 
        100
        FF
        z6
        12345678-1234-5678-1234-567812345678
        )rs)r�F)N)FT)T)r�)T)�r��__version__Z__versionTime__�
__author__r��weakrefrr�r�r�r�r�rjr�rFrcr�r�_threadr�ImportErrorZ	threadingZcollections.abcrrrr�Zordereddict�__all__r��version_inforbr0�maxsizer0r~r��chrr�r�r�r�r@�reversedr�r�rBr�r]r^rnZmaxintZxranger�Z__builtin__r�Zfnamerror�r�r�r�r�r�Zascii_uppercaseZascii_lowercaser6rTrFr5r�r�Z	printablerXrprr!r#r%r(r�r$�registerr;rLrIrTrWrYrSrrr&r.rrrr�rwrr
r	rnr1r)r'rr0r�rrrr,r+r3r2r"rrrrr rrr�rr4r�r�rr*rr�r/rrr
r-rrdrBr>r�rQrPrYrUrCrirjrlr�rErKrJrcrbr�Z_escapedPuncZ_escapedHexCharZ_escapedOctCharZ_singleCharZ
_charRanger�r�rarOr^r\rorfrDrrMrNrgrrmrVrr	rkrWr@r`r[rerRrhr7rYr9r8r�r�rrr=r]r:rGrr_rAr?rHrZr�r1r<rpr�ZselectTokenZ	fromTokenZidentZ
columnNameZcolumnNameListZ
columnSpecZ	tableNameZ
tableNameListZ	simpleSQLr.r/rrr0r*ryryryrz�<module>s��4�
8



@v&A= I
G3pLOD|M &#@sQ,A,	I#%0
,	?#p
��Zr

 (
 
���� 


"
	PK�V[#R���+_vendor/__pycache__/__init__.cpython-38.pycnu�[���U

�Qab�@sdS)N�rrr�B/usr/lib/python3.8/site-packages/pkg_resources/_vendor/__init__.py�<module>�PK�V[�޽g`g`_vendor/appdirs.pynu�[���# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor

"""Utilities for determining application-specific dirs.

See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html

__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))


import sys
import os

PY3 = sys.version_info[0] == 3

if PY3:
    unicode = str

if sys.platform.startswith('java'):
    import platform
    os_name = platform.java_ver()[3][0]
    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
        system = 'win32'
    elif os_name.startswith('Mac'): # "Mac OS X", etc.
        system = 'darwin'
    else: # "Linux", "SunOS", "FreeBSD", etc.
        # Setting this to "linux2" is not ideal, but only Windows or Mac
        # are actually checked for and the rest of the module expects
        # *sys.platform* style strings.
        system = 'linux2'
else:
    system = sys.platform



def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
    r"""Return full path to the user-specific data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user data directories are:
        Mac OS X:               ~/Library/Application Support/<AppName>
        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>

    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
    That means, by default "~/.local/share/<AppName>".
    """
    if system == "win32":
        if appauthor is None:
            appauthor = appname
        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
        path = os.path.normpath(_get_win_folder(const))
        if appname:
            if appauthor is not False:
                path = os.path.join(path, appauthor, appname)
            else:
                path = os.path.join(path, appname)
    elif system == 'darwin':
        path = os.path.expanduser('~/Library/Application Support/')
        if appname:
            path = os.path.join(path, appname)
    else:
        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
        if appname:
            path = os.path.join(path, appname)
    if appname and version:
        path = os.path.join(path, version)
    return path


def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
    r"""Return full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of data dirs should be
            returned. By default, the first item from XDG_DATA_DIRS is
            returned, or '/usr/local/share/<AppName>',
            if XDG_DATA_DIRS is not set

    Typical site data directories are:
        Mac OS X:   /Library/Application Support/<AppName>
        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.

    For Unix, this is using the $XDG_DATA_DIRS[0] default.

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    """
    if system == "win32":
        if appauthor is None:
            appauthor = appname
        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
        if appname:
            if appauthor is not False:
                path = os.path.join(path, appauthor, appname)
            else:
                path = os.path.join(path, appname)
    elif system == 'darwin':
        path = os.path.expanduser('/Library/Application Support')
        if appname:
            path = os.path.join(path, appname)
    else:
        # XDG default for $XDG_DATA_DIRS
        # only first, if multipath is False
        path = os.getenv('XDG_DATA_DIRS',
                         os.pathsep.join(['/usr/local/share', '/usr/share']))
        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
        if appname:
            if version:
                appname = os.path.join(appname, version)
            pathlist = [os.sep.join([x, appname]) for x in pathlist]

        if multipath:
            path = os.pathsep.join(pathlist)
        else:
            path = pathlist[0]
        return path

    if appname and version:
        path = os.path.join(path, version)
    return path


def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
    r"""Return full path to the user-specific config dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user config directories are:
        Mac OS X:               same as user_data_dir
        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
        Win *:                  same as user_data_dir

    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
    That means, by default "~/.config/<AppName>".
    """
    if system in ["win32", "darwin"]:
        path = user_data_dir(appname, appauthor, None, roaming)
    else:
        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
        if appname:
            path = os.path.join(path, appname)
    if appname and version:
        path = os.path.join(path, version)
    return path


def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
    r"""Return full path to the user-shared data dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "multipath" is an optional parameter only applicable to *nix
            which indicates that the entire list of config dirs should be
            returned. By default, the first item from XDG_CONFIG_DIRS is
            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set

    Typical site config directories are:
        Mac OS X:   same as site_data_dir
        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
                    $XDG_CONFIG_DIRS
        Win *:      same as site_data_dir
        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)

    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False

    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
    """
    if system in ["win32", "darwin"]:
        path = site_data_dir(appname, appauthor)
        if appname and version:
            path = os.path.join(path, version)
    else:
        # XDG default for $XDG_CONFIG_DIRS
        # only first, if multipath is False
        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
        if appname:
            if version:
                appname = os.path.join(appname, version)
            pathlist = [os.sep.join([x, appname]) for x in pathlist]

        if multipath:
            path = os.pathsep.join(pathlist)
        else:
            path = pathlist[0]
    return path


def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
    r"""Return full path to the user-specific cache dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Cache" to the base app data dir for Windows. See
            discussion below.

    Typical user cache directories are:
        Mac OS X:   ~/Library/Caches/<AppName>
        Unix:       ~/.cache/<AppName> (XDG default)
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache

    On Windows the only suggestion in the MSDN docs is that local settings go in
    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
    app data dir (the default returned by `user_data_dir` above). Apps typically
    put cache data somewhere *under* the given dir here. Some examples:
        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
        ...\Acme\SuperApp\Cache\1.0
    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
    This can be disabled with the `opinion=False` option.
    """
    if system == "win32":
        if appauthor is None:
            appauthor = appname
        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
        if appname:
            if appauthor is not False:
                path = os.path.join(path, appauthor, appname)
            else:
                path = os.path.join(path, appname)
            if opinion:
                path = os.path.join(path, "Cache")
    elif system == 'darwin':
        path = os.path.expanduser('~/Library/Caches')
        if appname:
            path = os.path.join(path, appname)
    else:
        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
        if appname:
            path = os.path.join(path, appname)
    if appname and version:
        path = os.path.join(path, version)
    return path


def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
    r"""Return full path to the user-specific state dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "roaming" (boolean, default False) can be set True to use the Windows
            roaming appdata directory. That means that for users on a Windows
            network setup for roaming profiles, this user data will be
            sync'd on login. See
            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
            for a discussion of issues.

    Typical user state directories are:
        Mac OS X:  same as user_data_dir
        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
        Win *:     same as user_data_dir

    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
    to extend the XDG spec and support $XDG_STATE_HOME.

    That means, by default "~/.local/state/<AppName>".
    """
    if system in ["win32", "darwin"]:
        path = user_data_dir(appname, appauthor, None, roaming)
    else:
        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
        if appname:
            path = os.path.join(path, appname)
    if appname and version:
        path = os.path.join(path, version)
    return path


def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
    r"""Return full path to the user-specific log dir for this application.

        "appname" is the name of application.
            If None, just the system directory is returned.
        "appauthor" (only used on Windows) is the name of the
            appauthor or distributing body for this application. Typically
            it is the owning company name. This falls back to appname. You may
            pass False to disable it.
        "version" is an optional version path element to append to the
            path. You might want to use this if you want multiple versions
            of your app to be able to run independently. If used, this
            would typically be "<major>.<minor>".
            Only applied when appname is present.
        "opinion" (boolean) can be False to disable the appending of
            "Logs" to the base app data dir for Windows, and "log" to the
            base cache dir for Unix. See discussion below.

    Typical user log directories are:
        Mac OS X:   ~/Library/Logs/<AppName>
        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs

    On Windows the only suggestion in the MSDN docs is that local settings
    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
    examples of what some windows apps use for a logs dir.)

    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
    value for Windows and appends "log" to the user cache dir for Unix.
    This can be disabled with the `opinion=False` option.
    """
    if system == "darwin":
        path = os.path.join(
            os.path.expanduser('~/Library/Logs'),
            appname)
    elif system == "win32":
        path = user_data_dir(appname, appauthor, version)
        version = False
        if opinion:
            path = os.path.join(path, "Logs")
    else:
        path = user_cache_dir(appname, appauthor, version)
        version = False
        if opinion:
            path = os.path.join(path, "log")
    if appname and version:
        path = os.path.join(path, version)
    return path


class AppDirs(object):
    """Convenience wrapper for getting application dirs."""
    def __init__(self, appname=None, appauthor=None, version=None,
            roaming=False, multipath=False):
        self.appname = appname
        self.appauthor = appauthor
        self.version = version
        self.roaming = roaming
        self.multipath = multipath

    @property
    def user_data_dir(self):
        return user_data_dir(self.appname, self.appauthor,
                             version=self.version, roaming=self.roaming)

    @property
    def site_data_dir(self):
        return site_data_dir(self.appname, self.appauthor,
                             version=self.version, multipath=self.multipath)

    @property
    def user_config_dir(self):
        return user_config_dir(self.appname, self.appauthor,
                               version=self.version, roaming=self.roaming)

    @property
    def site_config_dir(self):
        return site_config_dir(self.appname, self.appauthor,
                             version=self.version, multipath=self.multipath)

    @property
    def user_cache_dir(self):
        return user_cache_dir(self.appname, self.appauthor,
                              version=self.version)

    @property
    def user_state_dir(self):
        return user_state_dir(self.appname, self.appauthor,
                              version=self.version)

    @property
    def user_log_dir(self):
        return user_log_dir(self.appname, self.appauthor,
                            version=self.version)


#---- internal support stuff

def _get_win_folder_from_registry(csidl_name):
    """This is a fallback technique at best. I'm not sure if using the
    registry for this guarantees us the correct answer for all CSIDL_*
    names.
    """
    if PY3:
      import winreg as _winreg
    else:
      import _winreg

    shell_folder_name = {
        "CSIDL_APPDATA": "AppData",
        "CSIDL_COMMON_APPDATA": "Common AppData",
        "CSIDL_LOCAL_APPDATA": "Local AppData",
    }[csidl_name]

    key = _winreg.OpenKey(
        _winreg.HKEY_CURRENT_USER,
        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
    )
    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
    return dir


def _get_win_folder_with_pywin32(csidl_name):
    from win32com.shell import shellcon, shell
    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
    # Try to make this a unicode path because SHGetFolderPath does
    # not return unicode strings when there is unicode data in the
    # path.
    try:
        dir = unicode(dir)

        # Downgrade to short path name if have highbit chars. See
        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
        has_high_char = False
        for c in dir:
            if ord(c) > 255:
                has_high_char = True
                break
        if has_high_char:
            try:
                import win32api
                dir = win32api.GetShortPathName(dir)
            except ImportError:
                pass
    except UnicodeError:
        pass
    return dir


def _get_win_folder_with_ctypes(csidl_name):
    import ctypes

    csidl_const = {
        "CSIDL_APPDATA": 26,
        "CSIDL_COMMON_APPDATA": 35,
        "CSIDL_LOCAL_APPDATA": 28,
    }[csidl_name]

    buf = ctypes.create_unicode_buffer(1024)
    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)

    # Downgrade to short path name if have highbit chars. See
    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
    has_high_char = False
    for c in buf:
        if ord(c) > 255:
            has_high_char = True
            break
    if has_high_char:
        buf2 = ctypes.create_unicode_buffer(1024)
        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
            buf = buf2

    return buf.value

def _get_win_folder_with_jna(csidl_name):
    import array
    from com.sun import jna
    from com.sun.jna.platform import win32

    buf_size = win32.WinDef.MAX_PATH * 2
    buf = array.zeros('c', buf_size)
    shell = win32.Shell32.INSTANCE
    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
    dir = jna.Native.toString(buf.tostring()).rstrip("\0")

    # Downgrade to short path name if have highbit chars. See
    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
    has_high_char = False
    for c in dir:
        if ord(c) > 255:
            has_high_char = True
            break
    if has_high_char:
        buf = array.zeros('c', buf_size)
        kernel = win32.Kernel32.INSTANCE
        if kernel.GetShortPathName(dir, buf, buf_size):
            dir = jna.Native.toString(buf.tostring()).rstrip("\0")

    return dir

if system == "win32":
    try:
        import win32com.shell
        _get_win_folder = _get_win_folder_with_pywin32
    except ImportError:
        try:
            from ctypes import windll
            _get_win_folder = _get_win_folder_with_ctypes
        except ImportError:
            try:
                import com.sun.jna
                _get_win_folder = _get_win_folder_with_jna
            except ImportError:
                _get_win_folder = _get_win_folder_from_registry


#---- self test code

if __name__ == "__main__":
    appname = "MyApp"
    appauthor = "MyCompany"

    props = ("user_data_dir",
             "user_config_dir",
             "user_cache_dir",
             "user_state_dir",
             "user_log_dir",
             "site_data_dir",
             "site_config_dir")

    print("-- app dirs %s --" % __version__)

    print("-- app dirs (with optional 'version')")
    dirs = AppDirs(appname, appauthor, version="1.0")
    for prop in props:
        print("%s: %s" % (prop, getattr(dirs, prop)))

    print("\n-- app dirs (without optional 'version')")
    dirs = AppDirs(appname, appauthor)
    for prop in props:
        print("%s: %s" % (prop, getattr(dirs, prop)))

    print("\n-- app dirs (without optional 'appauthor')")
    dirs = AppDirs(appname)
    for prop in props:
        print("%s: %s" % (prop, getattr(dirs, prop)))

    print("\n-- app dirs (with disabled 'appauthor')")
    dirs = AppDirs(appname, appauthor=False)
    for prop in props:
        print("%s: %s" % (prop, getattr(dirs, prop)))
PK�V[f��w�w�_vendor/pyparsing.pynu�[���# module pyparsing.py
#
# Copyright (c) 2003-2018  Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#

__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================

The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.

Here is a program to parse "Hello, World!" (or any greeting of the form 
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::

    from pyparsing import Word, alphas

    # define grammar of a greeting
    greet = Word(alphas) + "," + Word(alphas) + "!"

    hello = "Hello, World!"
    print (hello, "->", greet.parseString(hello))

The program outputs the following::

    Hello, World! -> ['Hello', ',', 'World', '!']

The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.

The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.

The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
 - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
 - quoted strings
 - embedded comments


Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
 - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
 - construct character word-group expressions using the L{Word} class
 - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
 - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
 - associate names with your parsed results using L{ParserElement.setResultsName}
 - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
 - find more useful common expressions in the L{pyparsing_common} namespace class
"""

__version__ = "2.2.1"
__versionTime__ = "18 Sep 2018 00:49 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"

import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime

try:
    from _thread import RLock
except ImportError:
    from threading import RLock

try:
    # Python 3
    from collections.abc import Iterable
    from collections.abc import MutableMapping
except ImportError:
    # Python 2.7
    from collections import Iterable
    from collections import MutableMapping

try:
    from collections import OrderedDict as _OrderedDict
except ImportError:
    try:
        from ordereddict import OrderedDict as _OrderedDict
    except ImportError:
        _OrderedDict = None

#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )

__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]

system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
    _MAX_INT = sys.maxsize
    basestring = str
    unichr = chr
    _ustr = str

    # build list of single arg builtins, that can be used as parse actions
    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]

else:
    _MAX_INT = sys.maxint
    range = xrange

    def _ustr(obj):
        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
           then < returns the unicode object | encodes it with the default encoding | ... >.
        """
        if isinstance(obj,unicode):
            return obj

        try:
            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
            # it won't break any existing code.
            return str(obj)

        except UnicodeEncodeError:
            # Else encode it
            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
            xmlcharref = Regex(r'&#\d+;')
            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
            return xmlcharref.transformString(ret)

    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
    singleArgBuiltins = []
    import __builtin__
    for fname in "sum len sorted reversed list tuple set any all min max".split():
        try:
            singleArgBuiltins.append(getattr(__builtin__,fname))
        except AttributeError:
            continue
            
_generatorType = type((y for y in range(1)))
 
def _xml_escape(data):
    """Escape &, <, >, ", ', etc. in a string of data."""

    # ampersand must be replaced first
    from_symbols = '&><"\''
    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
    for from_,to_ in zip(from_symbols, to_symbols):
        data = data.replace(from_, to_)
    return data

class _Constants(object):
    pass

alphas     = string.ascii_uppercase + string.ascii_lowercase
nums       = "0123456789"
hexnums    = nums + "ABCDEFabcdef"
alphanums  = alphas + nums
_bslash    = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)

class ParseBaseException(Exception):
    """base exception class for all parsing runtime exceptions"""
    # Performance tuning: we construct a *lot* of these, so keep this
    # constructor as small and fast as possible
    def __init__( self, pstr, loc=0, msg=None, elem=None ):
        self.loc = loc
        if msg is None:
            self.msg = pstr
            self.pstr = ""
        else:
            self.msg = msg
            self.pstr = pstr
        self.parserElement = elem
        self.args = (pstr, loc, msg)

    @classmethod
    def _from_exception(cls, pe):
        """
        internal factory method to simplify creating one type of ParseException 
        from another - avoids having __init__ signature conflicts among subclasses
        """
        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)

    def __getattr__( self, aname ):
        """supported attributes by name are:
            - lineno - returns the line number of the exception text
            - col - returns the column number of the exception text
            - line - returns the line containing the exception text
        """
        if( aname == "lineno" ):
            return lineno( self.loc, self.pstr )
        elif( aname in ("col", "column") ):
            return col( self.loc, self.pstr )
        elif( aname == "line" ):
            return line( self.loc, self.pstr )
        else:
            raise AttributeError(aname)

    def __str__( self ):
        return "%s (at char %d), (line:%d, col:%d)" % \
                ( self.msg, self.loc, self.lineno, self.column )
    def __repr__( self ):
        return _ustr(self)
    def markInputline( self, markerString = ">!<" ):
        """Extracts the exception line from the input string, and marks
           the location of the exception with a special symbol.
        """
        line_str = self.line
        line_column = self.column - 1
        if markerString:
            line_str = "".join((line_str[:line_column],
                                markerString, line_str[line_column:]))
        return line_str.strip()
    def __dir__(self):
        return "lineno col line".split() + dir(type(self))

class ParseException(ParseBaseException):
    """
    Exception thrown when parse expressions don't match class;
    supported attributes by name are:
     - lineno - returns the line number of the exception text
     - col - returns the column number of the exception text
     - line - returns the line containing the exception text
        
    Example::
        try:
            Word(nums).setName("integer").parseString("ABC")
        except ParseException as pe:
            print(pe)
            print("column: {}".format(pe.col))
            
    prints::
       Expected integer (at char 0), (line:1, col:1)
        column: 1
    """
    pass

class ParseFatalException(ParseBaseException):
    """user-throwable exception thrown when inconsistent parse content
       is found; stops all parsing immediately"""
    pass

class ParseSyntaxException(ParseFatalException):
    """just like L{ParseFatalException}, but thrown internally when an
       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 
       immediately because an unbacktrackable syntax error has been found"""
    pass

#~ class ReparseException(ParseBaseException):
    #~ """Experimental class - parse actions can raise this exception to cause
       #~ pyparsing to reparse the input string:
        #~ - with a modified input string, and/or
        #~ - with a modified start location
       #~ Set the values of the ReparseException in the constructor, and raise the
       #~ exception in a parse action to cause pyparsing to use the new string/location.
       #~ Setting the values as None causes no change to be made.
       #~ """
    #~ def __init_( self, newstring, restartLoc ):
        #~ self.newParseText = newstring
        #~ self.reparseLoc = restartLoc

class RecursiveGrammarException(Exception):
    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
    def __init__( self, parseElementList ):
        self.parseElementTrace = parseElementList

    def __str__( self ):
        return "RecursiveGrammarException: %s" % self.parseElementTrace

class _ParseResultsWithOffset(object):
    def __init__(self,p1,p2):
        self.tup = (p1,p2)
    def __getitem__(self,i):
        return self.tup[i]
    def __repr__(self):
        return repr(self.tup[0])
    def setOffset(self,i):
        self.tup = (self.tup[0],i)

class ParseResults(object):
    """
    Structured parse results, to provide multiple means of access to the parsed data:
       - as a list (C{len(results)})
       - by list index (C{results[0], results[1]}, etc.)
       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})

    Example::
        integer = Word(nums)
        date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))
        # equivalent form:
        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

        # parseString returns a ParseResults object
        result = date_str.parseString("1999/12/31")

        def test(s, fn=repr):
            print("%s -> %s" % (s, fn(eval(s))))
        test("list(result)")
        test("result[0]")
        test("result['month']")
        test("result.day")
        test("'month' in result")
        test("'minutes' in result")
        test("result.dump()", str)
    prints::
        list(result) -> ['1999', '/', '12', '/', '31']
        result[0] -> '1999'
        result['month'] -> '12'
        result.day -> '31'
        'month' in result -> True
        'minutes' in result -> False
        result.dump() -> ['1999', '/', '12', '/', '31']
        - day: 31
        - month: 12
        - year: 1999
    """
    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
        if isinstance(toklist, cls):
            return toklist
        retobj = object.__new__(cls)
        retobj.__doinit = True
        return retobj

    # Performance tuning: we construct a *lot* of these, so keep this
    # constructor as small and fast as possible
    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
        if self.__doinit:
            self.__doinit = False
            self.__name = None
            self.__parent = None
            self.__accumNames = {}
            self.__asList = asList
            self.__modal = modal
            if toklist is None:
                toklist = []
            if isinstance(toklist, list):
                self.__toklist = toklist[:]
            elif isinstance(toklist, _generatorType):
                self.__toklist = list(toklist)
            else:
                self.__toklist = [toklist]
            self.__tokdict = dict()

        if name is not None and name:
            if not modal:
                self.__accumNames[name] = 0
            if isinstance(name,int):
                name = _ustr(name) # will always return a str, but use _ustr for consistency
            self.__name = name
            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
                if isinstance(toklist,basestring):
                    toklist = [ toklist ]
                if asList:
                    if isinstance(toklist,ParseResults):
                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
                    else:
                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
                    self[name].__name = name
                else:
                    try:
                        self[name] = toklist[0]
                    except (KeyError,TypeError,IndexError):
                        self[name] = toklist

    def __getitem__( self, i ):
        if isinstance( i, (int,slice) ):
            return self.__toklist[i]
        else:
            if i not in self.__accumNames:
                return self.__tokdict[i][-1][0]
            else:
                return ParseResults([ v[0] for v in self.__tokdict[i] ])

    def __setitem__( self, k, v, isinstance=isinstance ):
        if isinstance(v,_ParseResultsWithOffset):
            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
            sub = v[0]
        elif isinstance(k,(int,slice)):
            self.__toklist[k] = v
            sub = v
        else:
            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
            sub = v
        if isinstance(sub,ParseResults):
            sub.__parent = wkref(self)

    def __delitem__( self, i ):
        if isinstance(i,(int,slice)):
            mylen = len( self.__toklist )
            del self.__toklist[i]

            # convert int to slice
            if isinstance(i, int):
                if i < 0:
                    i += mylen
                i = slice(i, i+1)
            # get removed indices
            removed = list(range(*i.indices(mylen)))
            removed.reverse()
            # fixup indices in token dictionary
            for name,occurrences in self.__tokdict.items():
                for j in removed:
                    for k, (value, position) in enumerate(occurrences):
                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
        else:
            del self.__tokdict[i]

    def __contains__( self, k ):
        return k in self.__tokdict

    def __len__( self ): return len( self.__toklist )
    def __bool__(self): return ( not not self.__toklist )
    __nonzero__ = __bool__
    def __iter__( self ): return iter( self.__toklist )
    def __reversed__( self ): return iter( self.__toklist[::-1] )
    def _iterkeys( self ):
        if hasattr(self.__tokdict, "iterkeys"):
            return self.__tokdict.iterkeys()
        else:
            return iter(self.__tokdict)

    def _itervalues( self ):
        return (self[k] for k in self._iterkeys())
            
    def _iteritems( self ):
        return ((k, self[k]) for k in self._iterkeys())

    if PY_3:
        keys = _iterkeys       
        """Returns an iterator of all named result keys (Python 3.x only)."""

        values = _itervalues
        """Returns an iterator of all named result values (Python 3.x only)."""

        items = _iteritems
        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""

    else:
        iterkeys = _iterkeys
        """Returns an iterator of all named result keys (Python 2.x only)."""

        itervalues = _itervalues
        """Returns an iterator of all named result values (Python 2.x only)."""

        iteritems = _iteritems
        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""

        def keys( self ):
            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.iterkeys())

        def values( self ):
            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.itervalues())
                
        def items( self ):
            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
            return list(self.iteritems())

    def haskeys( self ):
        """Since keys() returns an iterator, this method is helpful in bypassing
           code that looks for the existence of any defined results names."""
        return bool(self.__tokdict)
        
    def pop( self, *args, **kwargs):
        """
        Removes and returns item at specified index (default=C{last}).
        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
        argument or an integer argument, it will use C{list} semantics
        and pop tokens from the list of parsed tokens. If passed a 
        non-integer argument (most likely a string), it will use C{dict}
        semantics and pop the corresponding value from any defined 
        results names. A second default return value argument is 
        supported, just as in C{dict.pop()}.

        Example::
            def remove_first(tokens):
                tokens.pop(0)
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

            label = Word(alphas)
            patt = label("LABEL") + OneOrMore(Word(nums))
            print(patt.parseString("AAB 123 321").dump())

            # Use pop() in a parse action to remove named result (note that corresponding value is not
            # removed from list form of results)
            def remove_LABEL(tokens):
                tokens.pop("LABEL")
                return tokens
            patt.addParseAction(remove_LABEL)
            print(patt.parseString("AAB 123 321").dump())
        prints::
            ['AAB', '123', '321']
            - LABEL: AAB

            ['AAB', '123', '321']
        """
        if not args:
            args = [-1]
        for k,v in kwargs.items():
            if k == 'default':
                args = (args[0], v)
            else:
                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
        if (isinstance(args[0], int) or 
                        len(args) == 1 or 
                        args[0] in self):
            index = args[0]
            ret = self[index]
            del self[index]
            return ret
        else:
            defaultvalue = args[1]
            return defaultvalue

    def get(self, key, defaultValue=None):
        """
        Returns named result matching the given key, or if there is no
        such name, then returns the given C{defaultValue} or C{None} if no
        C{defaultValue} is specified.

        Similar to C{dict.get()}.
        
        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            result = date_str.parseString("1999/12/31")
            print(result.get("year")) # -> '1999'
            print(result.get("hour", "not specified")) # -> 'not specified'
            print(result.get("hour")) # -> None
        """
        if key in self:
            return self[key]
        else:
            return defaultValue

    def insert( self, index, insStr ):
        """
        Inserts new element at location index in the list of parsed tokens.
        
        Similar to C{list.insert()}.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

            # use a parse action to insert the parse location in the front of the parsed results
            def insert_locn(locn, tokens):
                tokens.insert(0, locn)
            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
        """
        self.__toklist.insert(index, insStr)
        # fixup indices in token dictionary
        for name,occurrences in self.__tokdict.items():
            for k, (value, position) in enumerate(occurrences):
                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))

    def append( self, item ):
        """
        Add single element to end of ParseResults list of elements.

        Example::
            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
            
            # use a parse action to compute the sum of the parsed integers, and add it to the end
            def append_sum(tokens):
                tokens.append(sum(map(int, tokens)))
            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
        """
        self.__toklist.append(item)

    def extend( self, itemseq ):
        """
        Add sequence of elements to end of ParseResults list of elements.

        Example::
            patt = OneOrMore(Word(alphas))
            
            # use a parse action to append the reverse of the matched strings, to make a palindrome
            def make_palindrome(tokens):
                tokens.extend(reversed([t[::-1] for t in tokens]))
                return ''.join(tokens)
            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
        """
        if isinstance(itemseq, ParseResults):
            self += itemseq
        else:
            self.__toklist.extend(itemseq)

    def clear( self ):
        """
        Clear all elements and results names.
        """
        del self.__toklist[:]
        self.__tokdict.clear()

    def __getattr__( self, name ):
        try:
            return self[name]
        except KeyError:
            return ""
            
        if name in self.__tokdict:
            if name not in self.__accumNames:
                return self.__tokdict[name][-1][0]
            else:
                return ParseResults([ v[0] for v in self.__tokdict[name] ])
        else:
            return ""

    def __add__( self, other ):
        ret = self.copy()
        ret += other
        return ret

    def __iadd__( self, other ):
        if other.__tokdict:
            offset = len(self.__toklist)
            addoffset = lambda a: offset if a<0 else a+offset
            otheritems = other.__tokdict.items()
            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
                                for (k,vlist) in otheritems for v in vlist]
            for k,v in otherdictitems:
                self[k] = v
                if isinstance(v[0],ParseResults):
                    v[0].__parent = wkref(self)
            
        self.__toklist += other.__toklist
        self.__accumNames.update( other.__accumNames )
        return self

    def __radd__(self, other):
        if isinstance(other,int) and other == 0:
            # useful for merging many ParseResults using sum() builtin
            return self.copy()
        else:
            # this may raise a TypeError - so be it
            return other + self
        
    def __repr__( self ):
        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )

    def __str__( self ):
        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'

    def _asStringList( self, sep='' ):
        out = []
        for item in self.__toklist:
            if out and sep:
                out.append(sep)
            if isinstance( item, ParseResults ):
                out += item._asStringList()
            else:
                out.append( _ustr(item) )
        return out

    def asList( self ):
        """
        Returns the parse results as a nested list of matching tokens, all converted to strings.

        Example::
            patt = OneOrMore(Word(alphas))
            result = patt.parseString("sldkj lsdkj sldkj")
            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
            
            # Use asList() to create an actual list
            result_list = result.asList()
            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
        """
        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]

    def asDict( self ):
        """
        Returns the named parse results as a nested dictionary.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
            
            result_dict = result.asDict()
            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}

            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
            import json
            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
        """
        if PY_3:
            item_fn = self.items
        else:
            item_fn = self.iteritems
            
        def toItem(obj):
            if isinstance(obj, ParseResults):
                if obj.haskeys():
                    return obj.asDict()
                else:
                    return [toItem(v) for v in obj]
            else:
                return obj
                
        return dict((k,toItem(v)) for k,v in item_fn())

    def copy( self ):
        """
        Returns a new copy of a C{ParseResults} object.
        """
        ret = ParseResults( self.__toklist )
        ret.__tokdict = self.__tokdict.copy()
        ret.__parent = self.__parent
        ret.__accumNames.update( self.__accumNames )
        ret.__name = self.__name
        return ret

    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
        """
        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
        """
        nl = "\n"
        out = []
        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
                                                            for v in vlist)
        nextLevelIndent = indent + "  "

        # collapse out indents if formatting is not desired
        if not formatted:
            indent = ""
            nextLevelIndent = ""
            nl = ""

        selfTag = None
        if doctag is not None:
            selfTag = doctag
        else:
            if self.__name:
                selfTag = self.__name

        if not selfTag:
            if namedItemsOnly:
                return ""
            else:
                selfTag = "ITEM"

        out += [ nl, indent, "<", selfTag, ">" ]

        for i,res in enumerate(self.__toklist):
            if isinstance(res,ParseResults):
                if i in namedItems:
                    out += [ res.asXML(namedItems[i],
                                        namedItemsOnly and doctag is None,
                                        nextLevelIndent,
                                        formatted)]
                else:
                    out += [ res.asXML(None,
                                        namedItemsOnly and doctag is None,
                                        nextLevelIndent,
                                        formatted)]
            else:
                # individual token, see if there is a name for it
                resTag = None
                if i in namedItems:
                    resTag = namedItems[i]
                if not resTag:
                    if namedItemsOnly:
                        continue
                    else:
                        resTag = "ITEM"
                xmlBodyText = _xml_escape(_ustr(res))
                out += [ nl, nextLevelIndent, "<", resTag, ">",
                                                xmlBodyText,
                                                "</", resTag, ">" ]

        out += [ nl, indent, "</", selfTag, ">" ]
        return "".join(out)

    def __lookup(self,sub):
        for k,vlist in self.__tokdict.items():
            for v,loc in vlist:
                if sub is v:
                    return k
        return None

    def getName(self):
        r"""
        Returns the results name for this token expression. Useful when several 
        different expressions might match at a particular location.

        Example::
            integer = Word(nums)
            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
            house_number_expr = Suppress('#') + Word(nums, alphanums)
            user_data = (Group(house_number_expr)("house_number") 
                        | Group(ssn_expr)("ssn")
                        | Group(integer)("age"))
            user_info = OneOrMore(user_data)
            
            result = user_info.parseString("22 111-22-3333 #221B")
            for item in result:
                print(item.getName(), ':', item[0])
        prints::
            age : 22
            ssn : 111-22-3333
            house_number : 221B
        """
        if self.__name:
            return self.__name
        elif self.__parent:
            par = self.__parent()
            if par:
                return par.__lookup(self)
            else:
                return None
        elif (len(self) == 1 and
               len(self.__tokdict) == 1 and
               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
            return next(iter(self.__tokdict.keys()))
        else:
            return None

    def dump(self, indent='', depth=0, full=True):
        """
        Diagnostic method for listing out the contents of a C{ParseResults}.
        Accepts an optional C{indent} argument so that this string can be embedded
        in a nested display of other data.

        Example::
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
            
            result = date_str.parseString('12/31/1999')
            print(result.dump())
        prints::
            ['12', '/', '31', '/', '1999']
            - day: 1999
            - month: 31
            - year: 12
        """
        out = []
        NL = '\n'
        out.append( indent+_ustr(self.asList()) )
        if full:
            if self.haskeys():
                items = sorted((str(k), v) for k,v in self.items())
                for k,v in items:
                    if out:
                        out.append(NL)
                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
                    if isinstance(v,ParseResults):
                        if v:
                            out.append( v.dump(indent,depth+1) )
                        else:
                            out.append(_ustr(v))
                    else:
                        out.append(repr(v))
            elif any(isinstance(vv,ParseResults) for vv in self):
                v = self
                for i,vv in enumerate(v):
                    if isinstance(vv,ParseResults):
                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
                    else:
                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
            
        return "".join(out)

    def pprint(self, *args, **kwargs):
        """
        Pretty-printer for parsed results as a list, using the C{pprint} module.
        Accepts additional positional or keyword args as defined for the 
        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

        Example::
            ident = Word(alphas, alphanums)
            num = Word(nums)
            func = Forward()
            term = ident | num | Group('(' + func + ')')
            func <<= ident + Group(Optional(delimitedList(term)))
            result = func.parseString("fna a,b,(fnb c,d,200),100")
            result.pprint(width=40)
        prints::
            ['fna',
             ['a',
              'b',
              ['(', 'fnb', ['c', 'd', '200'], ')'],
              '100']]
        """
        pprint.pprint(self.asList(), *args, **kwargs)

    # add support for pickle protocol
    def __getstate__(self):
        return ( self.__toklist,
                 ( self.__tokdict.copy(),
                   self.__parent is not None and self.__parent() or None,
                   self.__accumNames,
                   self.__name ) )

    def __setstate__(self,state):
        self.__toklist = state[0]
        (self.__tokdict,
         par,
         inAccumNames,
         self.__name) = state[1]
        self.__accumNames = {}
        self.__accumNames.update(inAccumNames)
        if par is not None:
            self.__parent = wkref(par)
        else:
            self.__parent = None

    def __getnewargs__(self):
        return self.__toklist, self.__name, self.__asList, self.__modal

    def __dir__(self):
        return (dir(type(self)) + list(self.keys()))

MutableMapping.register(ParseResults)

def col (loc,strg):
    """Returns current column within a string, counting newlines as line separators.
   The first column is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   """
    s = strg
    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)

def lineno(loc,strg):
    """Returns current line number within a string, counting newlines as line separators.
   The first line is number 1.

   Note: the default parsing behavior is to expand tabs in the input string
   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
   consistent view of the parsed string, the parse location, and line and column
   positions within the parsed string.
   """
    return strg.count("\n",0,loc) + 1

def line( loc, strg ):
    """Returns the line of text containing loc within a string, counting newlines as line separators.
       """
    lastCR = strg.rfind("\n", 0, loc)
    nextCR = strg.find("\n", loc)
    if nextCR >= 0:
        return strg[lastCR+1:nextCR]
    else:
        return strg[lastCR+1:]

def _defaultStartDebugAction( instring, loc, expr ):
    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))

def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))

def _defaultExceptionDebugAction( instring, loc, expr, exc ):
    print ("Exception raised:" + _ustr(exc))

def nullDebugAction(*args):
    """'Do-nothing' debug action, to suppress debugging output during parsing."""
    pass

# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
    #~ if func in singleArgBuiltins:
        #~ return lambda s,l,t: func(t)
    #~ limit = 0
    #~ foundArity = False
    #~ def wrapper(*args):
        #~ nonlocal limit,foundArity
        #~ while 1:
            #~ try:
                #~ ret = func(*args[limit:])
                #~ foundArity = True
                #~ return ret
            #~ except TypeError:
                #~ if limit == maxargs or foundArity:
                    #~ raise
                #~ limit += 1
                #~ continue
    #~ return wrapper

# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
    if func in singleArgBuiltins:
        return lambda s,l,t: func(t)
    limit = [0]
    foundArity = [False]
    
    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
    if system_version[:2] >= (3,5):
        def extract_stack(limit=0):
            # special handling for Python 3.5.0 - extra deep call stack by 1
            offset = -3 if system_version == (3,5,0) else -2
            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
            return [frame_summary[:2]]
        def extract_tb(tb, limit=0):
            frames = traceback.extract_tb(tb, limit=limit)
            frame_summary = frames[-1]
            return [frame_summary[:2]]
    else:
        extract_stack = traceback.extract_stack
        extract_tb = traceback.extract_tb
    
    # synthesize what would be returned by traceback.extract_stack at the call to 
    # user's parse action 'func', so that we don't incur call penalty at parse time
    
    LINE_DIFF = 6
    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
    this_line = extract_stack(limit=2)[-1]
    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)

    def wrapper(*args):
        while 1:
            try:
                ret = func(*args[limit[0]:])
                foundArity[0] = True
                return ret
            except TypeError:
                # re-raise TypeErrors if they did not come from our arity testing
                if foundArity[0]:
                    raise
                else:
                    try:
                        tb = sys.exc_info()[-1]
                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
                            raise
                    finally:
                        del tb

                if limit[0] <= maxargs:
                    limit[0] += 1
                    continue
                raise

    # copy func name to wrapper for sensible debug output
    func_name = "<parse action>"
    try:
        func_name = getattr(func, '__name__', 
                            getattr(func, '__class__').__name__)
    except Exception:
        func_name = str(func)
    wrapper.__name__ = func_name

    return wrapper

class ParserElement(object):
    """Abstract base level parser element class."""
    DEFAULT_WHITE_CHARS = " \n\t\r"
    verbose_stacktrace = False

    @staticmethod
    def setDefaultWhitespaceChars( chars ):
        r"""
        Overrides the default whitespace chars

        Example::
            # default whitespace chars are space, <TAB> and newline
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
            
            # change to just treat newline as significant
            ParserElement.setDefaultWhitespaceChars(" \t")
            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
        """
        ParserElement.DEFAULT_WHITE_CHARS = chars

    @staticmethod
    def inlineLiteralsUsing(cls):
        """
        Set class to be used for inclusion of string literals into a parser.
        
        Example::
            # default literal class used is Literal
            integer = Word(nums)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']


            # change to Suppress
            ParserElement.inlineLiteralsUsing(Suppress)
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
        """
        ParserElement._literalStringClass = cls

    def __init__( self, savelist=False ):
        self.parseAction = list()
        self.failAction = None
        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall
        self.strRepr = None
        self.resultsName = None
        self.saveAsList = savelist
        self.skipWhitespace = True
        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
        self.copyDefaultWhiteChars = True
        self.mayReturnEmpty = False # used when checking for left-recursion
        self.keepTabs = False
        self.ignoreExprs = list()
        self.debug = False
        self.streamlined = False
        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
        self.errmsg = ""
        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
        self.debugActions = ( None, None, None ) #custom debug actions
        self.re = None
        self.callPreparse = True # used to avoid redundant calls to preParse
        self.callDuringTry = False

    def copy( self ):
        """
        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
        for the same parsing pattern, using copies of the original parse element.
        
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
            
            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
        prints::
            [5120, 100, 655360, 268435456]
        Equivalent form of C{expr.copy()} is just C{expr()}::
            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
        """
        cpy = copy.copy( self )
        cpy.parseAction = self.parseAction[:]
        cpy.ignoreExprs = self.ignoreExprs[:]
        if self.copyDefaultWhiteChars:
            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
        return cpy

    def setName( self, name ):
        """
        Define name for this expression, makes debugging and exception messages clearer.
        
        Example::
            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
        """
        self.name = name
        self.errmsg = "Expected " + self.name
        if hasattr(self,"exception"):
            self.exception.msg = self.errmsg
        return self

    def setResultsName( self, name, listAllMatches=False ):
        """
        Define name for referencing matching tokens as a nested attribute
        of the returned parse results.
        NOTE: this returns a *copy* of the original C{ParserElement} object;
        this is so that the client can define a basic element, such as an
        integer, and reference it in multiple places with different names.

        You can also set results names using the abbreviated syntax,
        C{expr("name")} in place of C{expr.setResultsName("name")} - 
        see L{I{__call__}<__call__>}.

        Example::
            date_str = (integer.setResultsName("year") + '/' 
                        + integer.setResultsName("month") + '/' 
                        + integer.setResultsName("day"))

            # equivalent form:
            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
        """
        newself = self.copy()
        if name.endswith("*"):
            name = name[:-1]
            listAllMatches=True
        newself.resultsName = name
        newself.modalResults = not listAllMatches
        return newself

    def setBreak(self,breakFlag = True):
        """Method to invoke the Python pdb debugger when this element is
           about to be parsed. Set C{breakFlag} to True to enable, False to
           disable.
        """
        if breakFlag:
            _parseMethod = self._parse
            def breaker(instring, loc, doActions=True, callPreParse=True):
                import pdb
                pdb.set_trace()
                return _parseMethod( instring, loc, doActions, callPreParse )
            breaker._originalParseMethod = _parseMethod
            self._parse = breaker
        else:
            if hasattr(self._parse,"_originalParseMethod"):
                self._parse = self._parse._originalParseMethod
        return self

    def setParseAction( self, *fns, **kwargs ):
        """
        Define one or more actions to perform when successfully matching parse element definition.
        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
         - s   = the original string being parsed (see note below)
         - loc = the location of the matching substring
         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
        If the functions in fns modify the tokens, they can return them as the return
        value from fn, and the modified list of tokens will replace the original.
        Otherwise, fn does not need to return any value.

        Optional keyword arguments:
         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

        Note: the default parsing behavior is to expand tabs in the input string
        before starting the parsing process.  See L{I{parseString}<parseString>} for more information
        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
        consistent view of the parsed string, the parse location, and line and column
        positions within the parsed string.
        
        Example::
            integer = Word(nums)
            date_str = integer + '/' + integer + '/' + integer

            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

            # use parse action to convert to ints at parse time
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            date_str = integer + '/' + integer + '/' + integer

            # note that integer fields are now ints, not strings
            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
        """
        self.parseAction = list(map(_trim_arity, list(fns)))
        self.callDuringTry = kwargs.get("callDuringTry", False)
        return self

    def addParseAction( self, *fns, **kwargs ):
        """
        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
        
        See examples in L{I{copy}<copy>}.
        """
        self.parseAction += list(map(_trim_arity, list(fns)))
        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
        return self

    def addCondition(self, *fns, **kwargs):
        """Add a boolean predicate function to expression's list of parse actions. See 
        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 
        functions passed to C{addCondition} need to return boolean success/fail of the condition.

        Optional keyword arguments:
         - message = define a custom message to be used in the raised exception
         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
         
        Example::
            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
            year_int = integer.copy()
            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
            date_str = year_int + '/' + integer + '/' + integer

            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
        """
        msg = kwargs.get("message", "failed user-defined condition")
        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
        for fn in fns:
            def pa(s,l,t):
                if not bool(_trim_arity(fn)(s,l,t)):
                    raise exc_type(s,l,msg)
            self.parseAction.append(pa)
        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
        return self

    def setFailAction( self, fn ):
        """Define action to perform if parsing fails at this expression.
           Fail acton fn is a callable function that takes the arguments
           C{fn(s,loc,expr,err)} where:
            - s = string being parsed
            - loc = location where expression match was attempted and failed
            - expr = the parse expression that failed
            - err = the exception thrown
           The function returns no value.  It may throw C{L{ParseFatalException}}
           if it is desired to stop parsing immediately."""
        self.failAction = fn
        return self

    def _skipIgnorables( self, instring, loc ):
        exprsFound = True
        while exprsFound:
            exprsFound = False
            for e in self.ignoreExprs:
                try:
                    while 1:
                        loc,dummy = e._parse( instring, loc )
                        exprsFound = True
                except ParseException:
                    pass
        return loc

    def preParse( self, instring, loc ):
        if self.ignoreExprs:
            loc = self._skipIgnorables( instring, loc )

        if self.skipWhitespace:
            wt = self.whiteChars
            instrlen = len(instring)
            while loc < instrlen and instring[loc] in wt:
                loc += 1

        return loc

    def parseImpl( self, instring, loc, doActions=True ):
        return loc, []

    def postParse( self, instring, loc, tokenlist ):
        return tokenlist

    #~ @profile
    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
        debugging = ( self.debug ) #and doActions )

        if debugging or self.failAction:
            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
            if (self.debugActions[0] ):
                self.debugActions[0]( instring, loc, self )
            if callPreParse and self.callPreparse:
                preloc = self.preParse( instring, loc )
            else:
                preloc = loc
            tokensStart = preloc
            try:
                try:
                    loc,tokens = self.parseImpl( instring, preloc, doActions )
                except IndexError:
                    raise ParseException( instring, len(instring), self.errmsg, self )
            except ParseBaseException as err:
                #~ print ("Exception raised:", err)
                if self.debugActions[2]:
                    self.debugActions[2]( instring, tokensStart, self, err )
                if self.failAction:
                    self.failAction( instring, tokensStart, self, err )
                raise
        else:
            if callPreParse and self.callPreparse:
                preloc = self.preParse( instring, loc )
            else:
                preloc = loc
            tokensStart = preloc
            if self.mayIndexError or preloc >= len(instring):
                try:
                    loc,tokens = self.parseImpl( instring, preloc, doActions )
                except IndexError:
                    raise ParseException( instring, len(instring), self.errmsg, self )
            else:
                loc,tokens = self.parseImpl( instring, preloc, doActions )

        tokens = self.postParse( instring, loc, tokens )

        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
        if self.parseAction and (doActions or self.callDuringTry):
            if debugging:
                try:
                    for fn in self.parseAction:
                        tokens = fn( instring, tokensStart, retTokens )
                        if tokens is not None:
                            retTokens = ParseResults( tokens,
                                                      self.resultsName,
                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
                                                      modal=self.modalResults )
                except ParseBaseException as err:
                    #~ print "Exception raised in user parse action:", err
                    if (self.debugActions[2] ):
                        self.debugActions[2]( instring, tokensStart, self, err )
                    raise
            else:
                for fn in self.parseAction:
                    tokens = fn( instring, tokensStart, retTokens )
                    if tokens is not None:
                        retTokens = ParseResults( tokens,
                                                  self.resultsName,
                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
                                                  modal=self.modalResults )
        if debugging:
            #~ print ("Matched",self,"->",retTokens.asList())
            if (self.debugActions[1] ):
                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )

        return loc, retTokens

    def tryParse( self, instring, loc ):
        try:
            return self._parse( instring, loc, doActions=False )[0]
        except ParseFatalException:
            raise ParseException( instring, loc, self.errmsg, self)
    
    def canParseNext(self, instring, loc):
        try:
            self.tryParse(instring, loc)
        except (ParseException, IndexError):
            return False
        else:
            return True

    class _UnboundedCache(object):
        def __init__(self):
            cache = {}
            self.not_in_cache = not_in_cache = object()

            def get(self, key):
                return cache.get(key, not_in_cache)

            def set(self, key, value):
                cache[key] = value

            def clear(self):
                cache.clear()
                
            def cache_len(self):
                return len(cache)

            self.get = types.MethodType(get, self)
            self.set = types.MethodType(set, self)
            self.clear = types.MethodType(clear, self)
            self.__len__ = types.MethodType(cache_len, self)

    if _OrderedDict is not None:
        class _FifoCache(object):
            def __init__(self, size):
                self.not_in_cache = not_in_cache = object()

                cache = _OrderedDict()

                def get(self, key):
                    return cache.get(key, not_in_cache)

                def set(self, key, value):
                    cache[key] = value
                    while len(cache) > size:
                        try:
                            cache.popitem(False)
                        except KeyError:
                            pass

                def clear(self):
                    cache.clear()

                def cache_len(self):
                    return len(cache)

                self.get = types.MethodType(get, self)
                self.set = types.MethodType(set, self)
                self.clear = types.MethodType(clear, self)
                self.__len__ = types.MethodType(cache_len, self)

    else:
        class _FifoCache(object):
            def __init__(self, size):
                self.not_in_cache = not_in_cache = object()

                cache = {}
                key_fifo = collections.deque([], size)

                def get(self, key):
                    return cache.get(key, not_in_cache)

                def set(self, key, value):
                    cache[key] = value
                    while len(key_fifo) > size:
                        cache.pop(key_fifo.popleft(), None)
                    key_fifo.append(key)

                def clear(self):
                    cache.clear()
                    key_fifo.clear()

                def cache_len(self):
                    return len(cache)

                self.get = types.MethodType(get, self)
                self.set = types.MethodType(set, self)
                self.clear = types.MethodType(clear, self)
                self.__len__ = types.MethodType(cache_len, self)

    # argument cache for optimizing repeated calls when backtracking through recursive expressions
    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
    packrat_cache_lock = RLock()
    packrat_cache_stats = [0, 0]

    # this method gets repeatedly called during backtracking with the same arguments -
    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
        HIT, MISS = 0, 1
        lookup = (self, instring, loc, callPreParse, doActions)
        with ParserElement.packrat_cache_lock:
            cache = ParserElement.packrat_cache
            value = cache.get(lookup)
            if value is cache.not_in_cache:
                ParserElement.packrat_cache_stats[MISS] += 1
                try:
                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
                except ParseBaseException as pe:
                    # cache a copy of the exception, without the traceback
                    cache.set(lookup, pe.__class__(*pe.args))
                    raise
                else:
                    cache.set(lookup, (value[0], value[1].copy()))
                    return value
            else:
                ParserElement.packrat_cache_stats[HIT] += 1
                if isinstance(value, Exception):
                    raise value
                return (value[0], value[1].copy())

    _parse = _parseNoCache

    @staticmethod
    def resetCache():
        ParserElement.packrat_cache.clear()
        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)

    _packratEnabled = False
    @staticmethod
    def enablePackrat(cache_size_limit=128):
        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
           Repeated parse attempts at the same string location (which happens
           often in many complex grammars) can immediately return a cached value,
           instead of re-executing parsing/validating code.  Memoizing is done of
           both valid results and parsing exceptions.
           
           Parameters:
            - cache_size_limit - (default=C{128}) - if an integer value is provided
              will limit the size of the packrat cache; if None is passed, then
              the cache size will be unbounded; if 0 is passed, the cache will
              be effectively disabled.
            
           This speedup may break existing programs that use parse actions that
           have side-effects.  For this reason, packrat parsing is disabled when
           you first import pyparsing.  To activate the packrat feature, your
           program must call the class method C{ParserElement.enablePackrat()}.  If
           your program uses C{psyco} to "compile as you go", you must call
           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
           Python will crash.  For best results, call C{enablePackrat()} immediately
           after importing pyparsing.
           
           Example::
               import pyparsing
               pyparsing.ParserElement.enablePackrat()
        """
        if not ParserElement._packratEnabled:
            ParserElement._packratEnabled = True
            if cache_size_limit is None:
                ParserElement.packrat_cache = ParserElement._UnboundedCache()
            else:
                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
            ParserElement._parse = ParserElement._parseCache

    def parseString( self, instring, parseAll=False ):
        """
        Execute the parse expression with the given string.
        This is the main interface to the client code, once the complete
        expression has been built.

        If you want the grammar to require that the entire input string be
        successfully parsed, then set C{parseAll} to True (equivalent to ending
        the grammar with C{L{StringEnd()}}).

        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
        in order to report proper column numbers in parse actions.
        If the input string contains tabs and
        the grammar uses parse actions that use the C{loc} argument to index into the
        string being parsed, you can ensure you have a consistent view of the input
        string by:
         - calling C{parseWithTabs} on your grammar before calling C{parseString}
           (see L{I{parseWithTabs}<parseWithTabs>})
         - define your parse action using the full C{(s,loc,toks)} signature, and
           reference the input string using the parse action's C{s} argument
         - explictly expand the tabs in your input string before calling
           C{parseString}
        
        Example::
            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
        """
        ParserElement.resetCache()
        if not self.streamlined:
            self.streamline()
            #~ self.saveAsList = True
        for e in self.ignoreExprs:
            e.streamline()
        if not self.keepTabs:
            instring = instring.expandtabs()
        try:
            loc, tokens = self._parse( instring, 0 )
            if parseAll:
                loc = self.preParse( instring, loc )
                se = Empty() + StringEnd()
                se._parse( instring, loc )
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc
        else:
            return tokens

    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
        """
        Scan the input string for expression matches.  Each match will return the
        matching tokens, start location, and end location.  May be called with optional
        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
        C{overlap} is specified, then overlapping matches will be reported.

        Note that the start and end locations are reported relative to the string
        being parsed.  See L{I{parseString}<parseString>} for more information on parsing
        strings with embedded tabs.

        Example::
            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
            print(source)
            for tokens,start,end in Word(alphas).scanString(source):
                print(' '*start + '^'*(end-start))
                print(' '*start + tokens[0])
        
        prints::
        
            sldjf123lsdjjkf345sldkjf879lkjsfd987
            ^^^^^
            sldjf
                    ^^^^^^^
                    lsdjjkf
                              ^^^^^^
                              sldkjf
                                       ^^^^^^
                                       lkjsfd
        """
        if not self.streamlined:
            self.streamline()
        for e in self.ignoreExprs:
            e.streamline()

        if not self.keepTabs:
            instring = _ustr(instring).expandtabs()
        instrlen = len(instring)
        loc = 0
        preparseFn = self.preParse
        parseFn = self._parse
        ParserElement.resetCache()
        matches = 0
        try:
            while loc <= instrlen and matches < maxMatches:
                try:
                    preloc = preparseFn( instring, loc )
                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
                except ParseException:
                    loc = preloc+1
                else:
                    if nextLoc > loc:
                        matches += 1
                        yield tokens, preloc, nextLoc
                        if overlap:
                            nextloc = preparseFn( instring, loc )
                            if nextloc > loc:
                                loc = nextLoc
                            else:
                                loc += 1
                        else:
                            loc = nextLoc
                    else:
                        loc = preloc+1
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def transformString( self, instring ):
        """
        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
        be returned from a parse action.  To use C{transformString}, define a grammar and
        attach a parse action to it that modifies the returned token list.
        Invoking C{transformString()} on a target string will then scan for matches,
        and replace the matched text patterns according to the logic in the parse
        action.  C{transformString()} returns the resulting transformed string.
        
        Example::
            wd = Word(alphas)
            wd.setParseAction(lambda toks: toks[0].title())
            
            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
        Prints::
            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
        """
        out = []
        lastE = 0
        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
        # keep string locs straight between transformString and scanString
        self.keepTabs = True
        try:
            for t,s,e in self.scanString( instring ):
                out.append( instring[lastE:s] )
                if t:
                    if isinstance(t,ParseResults):
                        out += t.asList()
                    elif isinstance(t,list):
                        out += t
                    else:
                        out.append(t)
                lastE = e
            out.append(instring[lastE:])
            out = [o for o in out if o]
            return "".join(map(_ustr,_flatten(out)))
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def searchString( self, instring, maxMatches=_MAX_INT ):
        """
        Another extension to C{L{scanString}}, simplifying the access to the tokens found
        to match the given parse expression.  May be called with optional
        C{maxMatches} argument, to clip searching after 'n' matches are found.
        
        Example::
            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
            cap_word = Word(alphas.upper(), alphas.lower())
            
            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))

            # the sum() builtin can be used to merge results into a single ParseResults object
            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
        prints::
            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
        """
        try:
            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
        """
        Generator method to split a string using the given expression as a separator.
        May be called with optional C{maxsplit} argument, to limit the number of splits;
        and the optional C{includeSeparators} argument (default=C{False}), if the separating
        matching text should be included in the split results.
        
        Example::        
            punc = oneOf(list(".,;:/-!?"))
            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
        prints::
            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
        """
        splits = 0
        last = 0
        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
            yield instring[last:s]
            if includeSeparators:
                yield t[0]
            last = e
        yield instring[last:]

    def __add__(self, other ):
        """
        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
        converts them to L{Literal}s by default.
        
        Example::
            greet = Word(alphas) + "," + Word(alphas) + "!"
            hello = "Hello, World!"
            print (hello, "->", greet.parseString(hello))
        Prints::
            Hello, World! -> ['Hello', ',', 'World', '!']
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return And( [ self, other ] )

    def __radd__(self, other ):
        """
        Implementation of + operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other + self

    def __sub__(self, other):
        """
        Implementation of - operator, returns C{L{And}} with error stop
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return self + And._ErrorStop() + other

    def __rsub__(self, other ):
        """
        Implementation of - operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other - self

    def __mul__(self,other):
        """
        Implementation of * operator, allows use of C{expr * 3} in place of
        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
        may also include C{None} as in:
         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
              to C{expr*n + L{ZeroOrMore}(expr)}
              (read as "at least n instances of C{expr}")
         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
              (read as "0 to n instances of C{expr}")
         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

        Note that C{expr*(None,n)} does not raise an exception if
        more than n exprs exist in the input stream; that is,
        C{expr*(None,n)} does not enforce a maximum number of expr
        occurrences.  If this behavior is desired, then write
        C{expr*(None,n) + ~expr}
        """
        if isinstance(other,int):
            minElements, optElements = other,0
        elif isinstance(other,tuple):
            other = (other + (None, None))[:2]
            if other[0] is None:
                other = (0, other[1])
            if isinstance(other[0],int) and other[1] is None:
                if other[0] == 0:
                    return ZeroOrMore(self)
                if other[0] == 1:
                    return OneOrMore(self)
                else:
                    return self*other[0] + ZeroOrMore(self)
            elif isinstance(other[0],int) and isinstance(other[1],int):
                minElements, optElements = other
                optElements -= minElements
            else:
                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
        else:
            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))

        if minElements < 0:
            raise ValueError("cannot multiply ParserElement by negative value")
        if optElements < 0:
            raise ValueError("second tuple value must be greater or equal to first tuple value")
        if minElements == optElements == 0:
            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")

        if (optElements):
            def makeOptionalList(n):
                if n>1:
                    return Optional(self + makeOptionalList(n-1))
                else:
                    return Optional(self)
            if minElements:
                if minElements == 1:
                    ret = self + makeOptionalList(optElements)
                else:
                    ret = And([self]*minElements) + makeOptionalList(optElements)
            else:
                ret = makeOptionalList(optElements)
        else:
            if minElements == 1:
                ret = self
            else:
                ret = And([self]*minElements)
        return ret

    def __rmul__(self, other):
        return self.__mul__(other)

    def __or__(self, other ):
        """
        Implementation of | operator - returns C{L{MatchFirst}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return MatchFirst( [ self, other ] )

    def __ror__(self, other ):
        """
        Implementation of | operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other | self

    def __xor__(self, other ):
        """
        Implementation of ^ operator - returns C{L{Or}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return Or( [ self, other ] )

    def __rxor__(self, other ):
        """
        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other ^ self

    def __and__(self, other ):
        """
        Implementation of & operator - returns C{L{Each}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return Each( [ self, other ] )

    def __rand__(self, other ):
        """
        Implementation of & operator when left operand is not a C{L{ParserElement}}
        """
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        if not isinstance( other, ParserElement ):
            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
                    SyntaxWarning, stacklevel=2)
            return None
        return other & self

    def __invert__( self ):
        """
        Implementation of ~ operator - returns C{L{NotAny}}
        """
        return NotAny( self )

    def __call__(self, name=None):
        """
        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
        
        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
        passed as C{True}.
           
        If C{name} is omitted, same as calling C{L{copy}}.

        Example::
            # these are equivalent
            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
        """
        if name is not None:
            return self.setResultsName(name)
        else:
            return self.copy()

    def suppress( self ):
        """
        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
        cluttering up returned output.
        """
        return Suppress( self )

    def leaveWhitespace( self ):
        """
        Disables the skipping of whitespace before matching the characters in the
        C{ParserElement}'s defined pattern.  This is normally only used internally by
        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
        """
        self.skipWhitespace = False
        return self

    def setWhitespaceChars( self, chars ):
        """
        Overrides the default whitespace chars
        """
        self.skipWhitespace = True
        self.whiteChars = chars
        self.copyDefaultWhiteChars = False
        return self

    def parseWithTabs( self ):
        """
        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
        Must be called before C{parseString} when the input grammar contains elements that
        match C{<TAB>} characters.
        """
        self.keepTabs = True
        return self

    def ignore( self, other ):
        """
        Define expression to be ignored (e.g., comments) while doing pattern
        matching; may be called repeatedly, to define multiple comment or other
        ignorable patterns.
        
        Example::
            patt = OneOrMore(Word(alphas))
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
            
            patt.ignore(cStyleComment)
            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
        """
        if isinstance(other, basestring):
            other = Suppress(other)

        if isinstance( other, Suppress ):
            if other not in self.ignoreExprs:
                self.ignoreExprs.append(other)
        else:
            self.ignoreExprs.append( Suppress( other.copy() ) )
        return self

    def setDebugActions( self, startAction, successAction, exceptionAction ):
        """
        Enable display of debugging messages while doing pattern matching.
        """
        self.debugActions = (startAction or _defaultStartDebugAction,
                             successAction or _defaultSuccessDebugAction,
                             exceptionAction or _defaultExceptionDebugAction)
        self.debug = True
        return self

    def setDebug( self, flag=True ):
        """
        Enable display of debugging messages while doing pattern matching.
        Set C{flag} to True to enable, False to disable.

        Example::
            wd = Word(alphas).setName("alphaword")
            integer = Word(nums).setName("numword")
            term = wd | integer
            
            # turn on debugging for wd
            wd.setDebug()

            OneOrMore(term).parseString("abc 123 xyz 890")
        
        prints::
            Match alphaword at loc 0(1,1)
            Matched alphaword -> ['abc']
            Match alphaword at loc 3(1,4)
            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
            Match alphaword at loc 7(1,8)
            Matched alphaword -> ['xyz']
            Match alphaword at loc 11(1,12)
            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
            Match alphaword at loc 15(1,16)
            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

        The output shown is that produced by the default debug actions - custom debug actions can be
        specified using L{setDebugActions}. Prior to attempting
        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
        which makes debugging and exception messages easier to understand - for instance, the default
        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
        """
        if flag:
            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
        else:
            self.debug = False
        return self

    def __str__( self ):
        return self.name

    def __repr__( self ):
        return _ustr(self)

    def streamline( self ):
        self.streamlined = True
        self.strRepr = None
        return self

    def checkRecursion( self, parseElementList ):
        pass

    def validate( self, validateTrace=[] ):
        """
        Check defined expressions for valid structure, check for infinite recursive definitions.
        """
        self.checkRecursion( [] )

    def parseFile( self, file_or_filename, parseAll=False ):
        """
        Execute the parse expression on the given file or filename.
        If a filename is specified (instead of a file object),
        the entire file is opened, read, and closed before parsing.
        """
        try:
            file_contents = file_or_filename.read()
        except AttributeError:
            with open(file_or_filename, "r") as f:
                file_contents = f.read()
        try:
            return self.parseString(file_contents, parseAll)
        except ParseBaseException as exc:
            if ParserElement.verbose_stacktrace:
                raise
            else:
                # catch and re-raise exception from here, clears out pyparsing internal stack trace
                raise exc

    def __eq__(self,other):
        if isinstance(other, ParserElement):
            return self is other or vars(self) == vars(other)
        elif isinstance(other, basestring):
            return self.matches(other)
        else:
            return super(ParserElement,self)==other

    def __ne__(self,other):
        return not (self == other)

    def __hash__(self):
        return hash(id(self))

    def __req__(self,other):
        return self == other

    def __rne__(self,other):
        return not (self == other)

    def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.
           
        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
            
        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
                
    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
        """
        Execute the parse expression on a series of test strings, showing each
        test, the parsed results or where the parse failed. Quick and easy way to
        run a parse expression against a list of sample strings.
           
        Parameters:
         - tests - a list of separate test strings, or a multiline string of test strings
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
              string; pass None to disable comment filtering
         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
              if False, only dump nested list
         - printResults - (default=C{True}) prints test output to stdout
         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

        Returns: a (success, results) tuple, where success indicates that all tests succeeded
        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
        test's output
        
        Example::
            number_expr = pyparsing_common.number.copy()

            result = number_expr.runTests('''
                # unsigned integer
                100
                # negative integer
                -100
                # float with scientific notation
                6.02e23
                # integer with scientific notation
                1e-12
                ''')
            print("Success" if result[0] else "Failed!")

            result = number_expr.runTests('''
                # stray character
                100Z
                # missing leading digit before '.'
                -.100
                # too many '.'
                3.14.159
                ''', failureTests=True)
            print("Success" if result[0] else "Failed!")
        prints::
            # unsigned integer
            100
            [100]

            # negative integer
            -100
            [-100]

            # float with scientific notation
            6.02e23
            [6.02e+23]

            # integer with scientific notation
            1e-12
            [1e-12]

            Success
            
            # stray character
            100Z
               ^
            FAIL: Expected end of text (at char 3), (line:1, col:4)

            # missing leading digit before '.'
            -.100
            ^
            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

            # too many '.'
            3.14.159
                ^
            FAIL: Expected end of text (at char 4), (line:1, col:5)

            Success

        Each test string must be on a single line. If you want to test a string that spans multiple
        lines, create a test like this::

            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
        
        (Note that this is a raw string literal, you must include the leading 'r'.)
        """
        if isinstance(tests, basestring):
            tests = list(map(str.strip, tests.rstrip().splitlines()))
        if isinstance(comment, basestring):
            comment = Literal(comment)
        allResults = []
        comments = []
        success = True
        for t in tests:
            if comment is not None and comment.matches(t, False) or comments and not t:
                comments.append(t)
                continue
            if not t:
                continue
            out = ['\n'.join(comments), t]
            comments = []
            try:
                t = t.replace(r'\n','\n')
                result = self.parseString(t, parseAll=parseAll)
                out.append(result.dump(full=fullDump))
                success = success and not failureTests
            except ParseBaseException as pe:
                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
                if '\n' in t:
                    out.append(line(pe.loc, t))
                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
                else:
                    out.append(' '*pe.loc + '^' + fatal)
                out.append("FAIL: " + str(pe))
                success = success and failureTests
                result = pe
            except Exception as exc:
                out.append("FAIL-EXCEPTION: " + str(exc))
                success = success and failureTests
                result = exc

            if printResults:
                if fullDump:
                    out.append('')
                print('\n'.join(out))

            allResults.append((t, result))
        
        return success, allResults

        
class Token(ParserElement):
    """
    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    """
    def __init__( self ):
        super(Token,self).__init__( savelist=False )


class Empty(Token):
    """
    An empty token, will always match.
    """
    def __init__( self ):
        super(Empty,self).__init__()
        self.name = "Empty"
        self.mayReturnEmpty = True
        self.mayIndexError = False


class NoMatch(Token):
    """
    A token that will never match.
    """
    def __init__( self ):
        super(NoMatch,self).__init__()
        self.name = "NoMatch"
        self.mayReturnEmpty = True
        self.mayIndexError = False
        self.errmsg = "Unmatchable token"

    def parseImpl( self, instring, loc, doActions=True ):
        raise ParseException(instring, loc, self.errmsg, self)


class Literal(Token):
    """
    Token to exactly match a specified string.
    
    Example::
        Literal('blah').parseString('blah')  # -> ['blah']
        Literal('blah').parseString('blahfooblah')  # -> ['blah']
        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    
    For case-insensitive matching, use L{CaselessLiteral}.
    
    For keyword matching (force word break before and after the matched string),
    use L{Keyword} or L{CaselessKeyword}.
    """
    def __init__( self, matchString ):
        super(Literal,self).__init__()
        self.match = matchString
        self.matchLen = len(matchString)
        try:
            self.firstMatchChar = matchString[0]
        except IndexError:
            warnings.warn("null string passed to Literal; use Empty() instead",
                            SyntaxWarning, stacklevel=2)
            self.__class__ = Empty
        self.name = '"%s"' % _ustr(self.match)
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = False
        self.mayIndexError = False

    # Performance tuning: this routine gets called a *lot*
    # if this is a single character match string  and the first character matches,
    # short-circuit as quickly as possible, and avoid calling startswith
    #~ @profile
    def parseImpl( self, instring, loc, doActions=True ):
        if (instring[loc] == self.firstMatchChar and
            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
            return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal

class Keyword(Token):
    """
    Token to exactly match a specified string as a keyword, that is, it must be
    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    Accepts two optional constructor arguments in addition to the keyword string:
     - C{identChars} is a string of characters that would be valid identifier characters,
          defaulting to all alphanumerics + "_" and "$"
     - C{caseless} allows case-insensitive matching, default is C{False}.
       
    Example::
        Keyword("start").parseString("start")  # -> ['start']
        Keyword("start").parseString("starting")  # -> Exception

    For case-insensitive matching, use L{CaselessKeyword}.
    """
    DEFAULT_KEYWORD_CHARS = alphanums+"_$"

    def __init__( self, matchString, identChars=None, caseless=False ):
        super(Keyword,self).__init__()
        if identChars is None:
            identChars = Keyword.DEFAULT_KEYWORD_CHARS
        self.match = matchString
        self.matchLen = len(matchString)
        try:
            self.firstMatchChar = matchString[0]
        except IndexError:
            warnings.warn("null string passed to Keyword; use Empty() instead",
                            SyntaxWarning, stacklevel=2)
        self.name = '"%s"' % self.match
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = False
        self.mayIndexError = False
        self.caseless = caseless
        if caseless:
            self.caselessmatch = matchString.upper()
            identChars = identChars.upper()
        self.identChars = set(identChars)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.caseless:
            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
                return loc+self.matchLen, self.match
        else:
            if (instring[loc] == self.firstMatchChar and
                (self.matchLen==1 or instring.startswith(self.match,loc)) and
                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
                (loc == 0 or instring[loc-1] not in self.identChars) ):
                return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)

    def copy(self):
        c = super(Keyword,self).copy()
        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
        return c

    @staticmethod
    def setDefaultKeywordChars( chars ):
        """Overrides the default Keyword chars
        """
        Keyword.DEFAULT_KEYWORD_CHARS = chars

class CaselessLiteral(Literal):
    """
    Token to match a specified string, ignoring case of letters.
    Note: the matched results will always be in the case of the given
    match string, NOT the case of the input text.

    Example::
        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
        
    (Contrast with example for L{CaselessKeyword}.)
    """
    def __init__( self, matchString ):
        super(CaselessLiteral,self).__init__( matchString.upper() )
        # Preserve the defining literal.
        self.returnString = matchString
        self.name = "'%s'" % self.returnString
        self.errmsg = "Expected " + self.name

    def parseImpl( self, instring, loc, doActions=True ):
        if instring[ loc:loc+self.matchLen ].upper() == self.match:
            return loc+self.matchLen, self.returnString
        raise ParseException(instring, loc, self.errmsg, self)

class CaselessKeyword(Keyword):
    """
    Caseless version of L{Keyword}.

    Example::
        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
        
    (Contrast with example for L{CaselessLiteral}.)
    """
    def __init__( self, matchString, identChars=None ):
        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )

    def parseImpl( self, instring, loc, doActions=True ):
        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
            return loc+self.matchLen, self.match
        raise ParseException(instring, loc, self.errmsg, self)

class CloseMatch(Token):
    """
    A variation on L{Literal} which matches "close" matches, that is, 
    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
     - C{match_string} - string to be matched
     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    
    The results from a successful parse will contain the matched text from the input string and the following named results:
     - C{mismatches} - a list of the positions within the match_string where mismatches were found
     - C{original} - the original match_string used to compare against the input string
    
    If C{mismatches} is an empty list, then the match was an exact match.
    
    Example::
        patt = CloseMatch("ATCATCGAATGGA")
        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

        # exact match
        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

        # close match allowing up to 2 mismatches
        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    """
    def __init__(self, match_string, maxMismatches=1):
        super(CloseMatch,self).__init__()
        self.name = match_string
        self.match_string = match_string
        self.maxMismatches = maxMismatches
        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
        self.mayIndexError = False
        self.mayReturnEmpty = False

    def parseImpl( self, instring, loc, doActions=True ):
        start = loc
        instrlen = len(instring)
        maxloc = start + len(self.match_string)

        if maxloc <= instrlen:
            match_string = self.match_string
            match_stringloc = 0
            mismatches = []
            maxMismatches = self.maxMismatches

            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
                src,mat = s_m
                if src != mat:
                    mismatches.append(match_stringloc)
                    if len(mismatches) > maxMismatches:
                        break
            else:
                loc = match_stringloc + 1
                results = ParseResults([instring[start:loc]])
                results['original'] = self.match_string
                results['mismatches'] = mismatches
                return loc, results

        raise ParseException(instring, loc, self.errmsg, self)


class Word(Token):
    """
    Token for matching words composed of allowed character sets.
    Defined with string containing all allowed initial characters,
    an optional string containing allowed body characters (if omitted,
    defaults to the initial character set), and an optional minimum,
    maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction. An optional
    C{excludeChars} parameter can list characters that might be found in 
    the input C{bodyChars} string; useful to define a word of all printables
    except for one or two characters, for instance.
    
    L{srange} is useful for defining custom character set strings for defining 
    C{Word} expressions, using range notation from regular expression character sets.
    
    A common mistake is to use C{Word} to match a specific literal string, as in 
    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    I{sets} of matchable characters. This expression would match "Add", "AAA",
    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    To match an exact literal string, use L{Literal} or L{Keyword}.

    pyparsing includes helper strings for building Words:
     - L{alphas}
     - L{nums}
     - L{alphanums}
     - L{hexnums}
     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
     - L{printables} (any non-whitespace character)

    Example::
        # a word composed of digits
        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
        
        # a word with a leading capital, and zero or more lowercase
        capital_word = Word(alphas.upper(), alphas.lower())

        # hostnames are alphanumeric, with leading alpha, and '-'
        hostname = Word(alphas, alphanums+'-')
        
        # roman numeral (not a strict parser, accepts invalid mix of characters)
        roman = Word("IVXLCDM")
        
        # any string of non-whitespace characters, except for ','
        csv_value = Word(printables, excludeChars=",")
    """
    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
        super(Word,self).__init__()
        if excludeChars:
            initChars = ''.join(c for c in initChars if c not in excludeChars)
            if bodyChars:
                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
        self.initCharsOrig = initChars
        self.initChars = set(initChars)
        if bodyChars :
            self.bodyCharsOrig = bodyChars
            self.bodyChars = set(bodyChars)
        else:
            self.bodyCharsOrig = initChars
            self.bodyChars = set(initChars)

        self.maxSpecified = max > 0

        if min < 1:
            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.asKeyword = asKeyword

        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
            if self.bodyCharsOrig == self.initCharsOrig:
                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
            elif len(self.initCharsOrig) == 1:
                self.reString = "%s[%s]*" % \
                                      (re.escape(self.initCharsOrig),
                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
            else:
                self.reString = "[%s][%s]*" % \
                                      (_escapeRegexRangeChars(self.initCharsOrig),
                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
            if self.asKeyword:
                self.reString = r"\b"+self.reString+r"\b"
            try:
                self.re = re.compile( self.reString )
            except Exception:
                self.re = None

    def parseImpl( self, instring, loc, doActions=True ):
        if self.re:
            result = self.re.match(instring,loc)
            if not result:
                raise ParseException(instring, loc, self.errmsg, self)

            loc = result.end()
            return loc, result.group()

        if not(instring[ loc ] in self.initChars):
            raise ParseException(instring, loc, self.errmsg, self)

        start = loc
        loc += 1
        instrlen = len(instring)
        bodychars = self.bodyChars
        maxloc = start + self.maxLen
        maxloc = min( maxloc, instrlen )
        while loc < maxloc and instring[loc] in bodychars:
            loc += 1

        throwException = False
        if loc - start < self.minLen:
            throwException = True
        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
            throwException = True
        if self.asKeyword:
            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
                throwException = True

        if throwException:
            raise ParseException(instring, loc, self.errmsg, self)

        return loc, instring[start:loc]

    def __str__( self ):
        try:
            return super(Word,self).__str__()
        except Exception:
            pass


        if self.strRepr is None:

            def charsAsStr(s):
                if len(s)>4:
                    return s[:4]+"..."
                else:
                    return s

            if ( self.initCharsOrig != self.bodyCharsOrig ):
                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
            else:
                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)

        return self.strRepr


class Regex(Token):
    r"""
    Token for matching strings that match a given regular expression.
    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 
    named parse results.

    Example::
        realnum = Regex(r"[+-]?\d+\.\d*")
        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    """
    compiledREtype = type(re.compile("[A-Z]"))
    def __init__( self, pattern, flags=0):
        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
        super(Regex,self).__init__()

        if isinstance(pattern, basestring):
            if not pattern:
                warnings.warn("null string passed to Regex; use Empty() instead",
                        SyntaxWarning, stacklevel=2)

            self.pattern = pattern
            self.flags = flags

            try:
                self.re = re.compile(self.pattern, self.flags)
                self.reString = self.pattern
            except sre_constants.error:
                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
                    SyntaxWarning, stacklevel=2)
                raise

        elif isinstance(pattern, Regex.compiledREtype):
            self.re = pattern
            self.pattern = \
            self.reString = str(pattern)
            self.flags = flags
            
        else:
            raise ValueError("Regex may only be constructed with a string or a compiled RE object")

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        result = self.re.match(instring,loc)
        if not result:
            raise ParseException(instring, loc, self.errmsg, self)

        loc = result.end()
        d = result.groupdict()
        ret = ParseResults(result.group())
        if d:
            for k in d:
                ret[k] = d[k]
        return loc,ret

    def __str__( self ):
        try:
            return super(Regex,self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            self.strRepr = "Re:(%s)" % repr(self.pattern)

        return self.strRepr


class QuotedString(Token):
    r"""
    Token for matching strings that are delimited by quoting characters.
    
    Defined with the following parameters:
        - quoteChar - string of one or more characters defining the quote delimiting string
        - escChar - character to escape quotes, typically backslash (default=C{None})
        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

    Example::
        qs = QuotedString('"')
        print(qs.searchString('lsjdf "This is the quote" sldjf'))
        complex_qs = QuotedString('{{', endQuoteChar='}}')
        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
        sql_qs = QuotedString('"', escQuote='""')
        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    prints::
        [['This is the quote']]
        [['This is the "quote"']]
        [['This is the quote with "embedded" quotes']]
    """
    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
        super(QuotedString,self).__init__()

        # remove white space from quote chars - wont work anyway
        quoteChar = quoteChar.strip()
        if not quoteChar:
            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
            raise SyntaxError()

        if endQuoteChar is None:
            endQuoteChar = quoteChar
        else:
            endQuoteChar = endQuoteChar.strip()
            if not endQuoteChar:
                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
                raise SyntaxError()

        self.quoteChar = quoteChar
        self.quoteCharLen = len(quoteChar)
        self.firstQuoteChar = quoteChar[0]
        self.endQuoteChar = endQuoteChar
        self.endQuoteCharLen = len(endQuoteChar)
        self.escChar = escChar
        self.escQuote = escQuote
        self.unquoteResults = unquoteResults
        self.convertWhitespaceEscapes = convertWhitespaceEscapes

        if multiline:
            self.flags = re.MULTILINE | re.DOTALL
            self.pattern = r'%s(?:[^%s%s]' % \
                ( re.escape(self.quoteChar),
                  _escapeRegexRangeChars(self.endQuoteChar[0]),
                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
        else:
            self.flags = 0
            self.pattern = r'%s(?:[^%s\n\r%s]' % \
                ( re.escape(self.quoteChar),
                  _escapeRegexRangeChars(self.endQuoteChar[0]),
                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
        if len(self.endQuoteChar) > 1:
            self.pattern += (
                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
                )
        if escQuote:
            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
        if escChar:
            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))

        try:
            self.re = re.compile(self.pattern, self.flags)
            self.reString = self.pattern
        except sre_constants.error:
            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
                SyntaxWarning, stacklevel=2)
            raise

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayIndexError = False
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
        if not result:
            raise ParseException(instring, loc, self.errmsg, self)

        loc = result.end()
        ret = result.group()

        if self.unquoteResults:

            # strip off quotes
            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]

            if isinstance(ret,basestring):
                # replace escaped whitespace
                if '\\' in ret and self.convertWhitespaceEscapes:
                    ws_map = {
                        r'\t' : '\t',
                        r'\n' : '\n',
                        r'\f' : '\f',
                        r'\r' : '\r',
                    }
                    for wslit,wschar in ws_map.items():
                        ret = ret.replace(wslit, wschar)

                # replace escaped characters
                if self.escChar:
                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)

                # replace escaped quotes
                if self.escQuote:
                    ret = ret.replace(self.escQuote, self.endQuoteChar)

        return loc, ret

    def __str__( self ):
        try:
            return super(QuotedString,self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)

        return self.strRepr


class CharsNotIn(Token):
    """
    Token for matching words composed of characters I{not} in a given set (will
    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    Defined with string containing all disallowed characters, and an optional
    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    are 0, meaning no maximum or exact length restriction.

    Example::
        # define a comma-separated-value as anything that is not a ','
        csv_value = CharsNotIn(',')
        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    prints::
        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    """
    def __init__( self, notChars, min=1, max=0, exact=0 ):
        super(CharsNotIn,self).__init__()
        self.skipWhitespace = False
        self.notChars = notChars

        if min < 1:
            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

        self.name = _ustr(self)
        self.errmsg = "Expected " + self.name
        self.mayReturnEmpty = ( self.minLen == 0 )
        self.mayIndexError = False

    def parseImpl( self, instring, loc, doActions=True ):
        if instring[loc] in self.notChars:
            raise ParseException(instring, loc, self.errmsg, self)

        start = loc
        loc += 1
        notchars = self.notChars
        maxlen = min( start+self.maxLen, len(instring) )
        while loc < maxlen and \
              (instring[loc] not in notchars):
            loc += 1

        if loc - start < self.minLen:
            raise ParseException(instring, loc, self.errmsg, self)

        return loc, instring[start:loc]

    def __str__( self ):
        try:
            return super(CharsNotIn, self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            if len(self.notChars) > 4:
                self.strRepr = "!W:(%s...)" % self.notChars[:4]
            else:
                self.strRepr = "!W:(%s)" % self.notChars

        return self.strRepr

class White(Token):
    """
    Special matching class for matching whitespace.  Normally, whitespace is ignored
    by pyparsing grammars.  This class is included when some whitespace structures
    are significant.  Define with a string containing the whitespace characters to be
    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    as defined for the C{L{Word}} class.
    """
    whiteStrs = {
        " " : "<SPC>",
        "\t": "<TAB>",
        "\n": "<LF>",
        "\r": "<CR>",
        "\f": "<FF>",
        }
    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
        super(White,self).__init__()
        self.matchWhite = ws
        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
        #~ self.leaveWhitespace()
        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
        self.mayReturnEmpty = True
        self.errmsg = "Expected " + self.name

        self.minLen = min

        if max > 0:
            self.maxLen = max
        else:
            self.maxLen = _MAX_INT

        if exact > 0:
            self.maxLen = exact
            self.minLen = exact

    def parseImpl( self, instring, loc, doActions=True ):
        if not(instring[ loc ] in self.matchWhite):
            raise ParseException(instring, loc, self.errmsg, self)
        start = loc
        loc += 1
        maxloc = start + self.maxLen
        maxloc = min( maxloc, len(instring) )
        while loc < maxloc and instring[loc] in self.matchWhite:
            loc += 1

        if loc - start < self.minLen:
            raise ParseException(instring, loc, self.errmsg, self)

        return loc, instring[start:loc]


class _PositionToken(Token):
    def __init__( self ):
        super(_PositionToken,self).__init__()
        self.name=self.__class__.__name__
        self.mayReturnEmpty = True
        self.mayIndexError = False

class GoToColumn(_PositionToken):
    """
    Token to advance to a specific column of input text; useful for tabular report scraping.
    """
    def __init__( self, colno ):
        super(GoToColumn,self).__init__()
        self.col = colno

    def preParse( self, instring, loc ):
        if col(loc,instring) != self.col:
            instrlen = len(instring)
            if self.ignoreExprs:
                loc = self._skipIgnorables( instring, loc )
            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
                loc += 1
        return loc

    def parseImpl( self, instring, loc, doActions=True ):
        thiscol = col( loc, instring )
        if thiscol > self.col:
            raise ParseException( instring, loc, "Text not in expected column", self )
        newloc = loc + self.col - thiscol
        ret = instring[ loc: newloc ]
        return newloc, ret


class LineStart(_PositionToken):
    """
    Matches if current position is at the beginning of a line within the parse string
    
    Example::
    
        test = '''\
        AAA this line
        AAA and this line
          AAA but not this one
        B AAA and definitely not this one
        '''

        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
            print(t)
    
    Prints::
        ['AAA', ' this line']
        ['AAA', ' and this line']    

    """
    def __init__( self ):
        super(LineStart,self).__init__()
        self.errmsg = "Expected start of line"

    def parseImpl( self, instring, loc, doActions=True ):
        if col(loc, instring) == 1:
            return loc, []
        raise ParseException(instring, loc, self.errmsg, self)

class LineEnd(_PositionToken):
    """
    Matches if current position is at the end of a line within the parse string
    """
    def __init__( self ):
        super(LineEnd,self).__init__()
        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
        self.errmsg = "Expected end of line"

    def parseImpl( self, instring, loc, doActions=True ):
        if loc<len(instring):
            if instring[loc] == "\n":
                return loc+1, "\n"
            else:
                raise ParseException(instring, loc, self.errmsg, self)
        elif loc == len(instring):
            return loc+1, []
        else:
            raise ParseException(instring, loc, self.errmsg, self)

class StringStart(_PositionToken):
    """
    Matches if current position is at the beginning of the parse string
    """
    def __init__( self ):
        super(StringStart,self).__init__()
        self.errmsg = "Expected start of text"

    def parseImpl( self, instring, loc, doActions=True ):
        if loc != 0:
            # see if entire string up to here is just whitespace and ignoreables
            if loc != self.preParse( instring, 0 ):
                raise ParseException(instring, loc, self.errmsg, self)
        return loc, []

class StringEnd(_PositionToken):
    """
    Matches if current position is at the end of the parse string
    """
    def __init__( self ):
        super(StringEnd,self).__init__()
        self.errmsg = "Expected end of text"

    def parseImpl( self, instring, loc, doActions=True ):
        if loc < len(instring):
            raise ParseException(instring, loc, self.errmsg, self)
        elif loc == len(instring):
            return loc+1, []
        elif loc > len(instring):
            return loc, []
        else:
            raise ParseException(instring, loc, self.errmsg, self)

class WordStart(_PositionToken):
    """
    Matches if the current position is at the beginning of a Word, and
    is not preceded by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    the string being parsed, or at the beginning of a line.
    """
    def __init__(self, wordChars = printables):
        super(WordStart,self).__init__()
        self.wordChars = set(wordChars)
        self.errmsg = "Not at the start of a word"

    def parseImpl(self, instring, loc, doActions=True ):
        if loc != 0:
            if (instring[loc-1] in self.wordChars or
                instring[loc] not in self.wordChars):
                raise ParseException(instring, loc, self.errmsg, self)
        return loc, []

class WordEnd(_PositionToken):
    """
    Matches if the current position is at the end of a Word, and
    is not followed by any character in a given set of C{wordChars}
    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    the string being parsed, or at the end of a line.
    """
    def __init__(self, wordChars = printables):
        super(WordEnd,self).__init__()
        self.wordChars = set(wordChars)
        self.skipWhitespace = False
        self.errmsg = "Not at the end of a word"

    def parseImpl(self, instring, loc, doActions=True ):
        instrlen = len(instring)
        if instrlen>0 and loc<instrlen:
            if (instring[loc] in self.wordChars or
                instring[loc-1] not in self.wordChars):
                raise ParseException(instring, loc, self.errmsg, self)
        return loc, []


class ParseExpression(ParserElement):
    """
    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
    """
    def __init__( self, exprs, savelist = False ):
        super(ParseExpression,self).__init__(savelist)
        if isinstance( exprs, _generatorType ):
            exprs = list(exprs)

        if isinstance( exprs, basestring ):
            self.exprs = [ ParserElement._literalStringClass( exprs ) ]
        elif isinstance( exprs, Iterable ):
            exprs = list(exprs)
            # if sequence of strings provided, wrap with Literal
            if all(isinstance(expr, basestring) for expr in exprs):
                exprs = map(ParserElement._literalStringClass, exprs)
            self.exprs = list(exprs)
        else:
            try:
                self.exprs = list( exprs )
            except TypeError:
                self.exprs = [ exprs ]
        self.callPreparse = False

    def __getitem__( self, i ):
        return self.exprs[i]

    def append( self, other ):
        self.exprs.append( other )
        self.strRepr = None
        return self

    def leaveWhitespace( self ):
        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
           all contained expressions."""
        self.skipWhitespace = False
        self.exprs = [ e.copy() for e in self.exprs ]
        for e in self.exprs:
            e.leaveWhitespace()
        return self

    def ignore( self, other ):
        if isinstance( other, Suppress ):
            if other not in self.ignoreExprs:
                super( ParseExpression, self).ignore( other )
                for e in self.exprs:
                    e.ignore( self.ignoreExprs[-1] )
        else:
            super( ParseExpression, self).ignore( other )
            for e in self.exprs:
                e.ignore( self.ignoreExprs[-1] )
        return self

    def __str__( self ):
        try:
            return super(ParseExpression,self).__str__()
        except Exception:
            pass

        if self.strRepr is None:
            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
        return self.strRepr

    def streamline( self ):
        super(ParseExpression,self).streamline()

        for e in self.exprs:
            e.streamline()

        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
        # but only if there are no parse actions or resultsNames on the nested And's
        # (likewise for Or's and MatchFirst's)
        if ( len(self.exprs) == 2 ):
            other = self.exprs[0]
            if ( isinstance( other, self.__class__ ) and
                  not(other.parseAction) and
                  other.resultsName is None and
                  not other.debug ):
                self.exprs = other.exprs[:] + [ self.exprs[1] ]
                self.strRepr = None
                self.mayReturnEmpty |= other.mayReturnEmpty
                self.mayIndexError  |= other.mayIndexError

            other = self.exprs[-1]
            if ( isinstance( other, self.__class__ ) and
                  not(other.parseAction) and
                  other.resultsName is None and
                  not other.debug ):
                self.exprs = self.exprs[:-1] + other.exprs[:]
                self.strRepr = None
                self.mayReturnEmpty |= other.mayReturnEmpty
                self.mayIndexError  |= other.mayIndexError

        self.errmsg = "Expected " + _ustr(self)
        
        return self

    def setResultsName( self, name, listAllMatches=False ):
        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
        return ret

    def validate( self, validateTrace=[] ):
        tmp = validateTrace[:]+[self]
        for e in self.exprs:
            e.validate(tmp)
        self.checkRecursion( [] )
        
    def copy(self):
        ret = super(ParseExpression,self).copy()
        ret.exprs = [e.copy() for e in self.exprs]
        return ret

class And(ParseExpression):
    """
    Requires all given C{ParseExpression}s to be found in the given order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'+'} operator.
    May also be constructed using the C{'-'} operator, which will suppress backtracking.

    Example::
        integer = Word(nums)
        name_expr = OneOrMore(Word(alphas))

        expr = And([integer("id"),name_expr("name"),integer("age")])
        # more easily written as:
        expr = integer("id") + name_expr("name") + integer("age")
    """

    class _ErrorStop(Empty):
        def __init__(self, *args, **kwargs):
            super(And._ErrorStop,self).__init__(*args, **kwargs)
            self.name = '-'
            self.leaveWhitespace()

    def __init__( self, exprs, savelist = True ):
        super(And,self).__init__(exprs, savelist)
        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
        self.setWhitespaceChars( self.exprs[0].whiteChars )
        self.skipWhitespace = self.exprs[0].skipWhitespace
        self.callPreparse = True

    def parseImpl( self, instring, loc, doActions=True ):
        # pass False as last arg to _parse for first element, since we already
        # pre-parsed the string as part of our And pre-parsing
        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
        errorStop = False
        for e in self.exprs[1:]:
            if isinstance(e, And._ErrorStop):
                errorStop = True
                continue
            if errorStop:
                try:
                    loc, exprtokens = e._parse( instring, loc, doActions )
                except ParseSyntaxException:
                    raise
                except ParseBaseException as pe:
                    pe.__traceback__ = None
                    raise ParseSyntaxException._from_exception(pe)
                except IndexError:
                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
            else:
                loc, exprtokens = e._parse( instring, loc, doActions )
            if exprtokens or exprtokens.haskeys():
                resultlist += exprtokens
        return loc, resultlist

    def __iadd__(self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        return self.append( other ) #And( [ self, other ] )

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )
            if not e.mayReturnEmpty:
                break

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr


class Or(ParseExpression):
    """
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the expression that matches the longest string will be used.
    May be constructed using the C{'^'} operator.

    Example::
        # construct Or using '^' operator
        
        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789"))
    prints::
        [['123'], ['3.1416'], ['789']]
    """
    def __init__( self, exprs, savelist = False ):
        super(Or,self).__init__(exprs, savelist)
        if self.exprs:
            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
        else:
            self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        maxExcLoc = -1
        maxException = None
        matches = []
        for e in self.exprs:
            try:
                loc2 = e.tryParse( instring, loc )
            except ParseException as err:
                err.__traceback__ = None
                if err.loc > maxExcLoc:
                    maxException = err
                    maxExcLoc = err.loc
            except IndexError:
                if len(instring) > maxExcLoc:
                    maxException = ParseException(instring,len(instring),e.errmsg,self)
                    maxExcLoc = len(instring)
            else:
                # save match among all matches, to retry longest to shortest
                matches.append((loc2, e))

        if matches:
            matches.sort(key=lambda x: -x[0])
            for _,e in matches:
                try:
                    return e._parse( instring, loc, doActions )
                except ParseException as err:
                    err.__traceback__ = None
                    if err.loc > maxExcLoc:
                        maxException = err
                        maxExcLoc = err.loc

        if maxException is not None:
            maxException.msg = self.errmsg
            raise maxException
        else:
            raise ParseException(instring, loc, "no defined alternatives to match", self)


    def __ixor__(self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        return self.append( other ) #Or( [ self, other ] )

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class MatchFirst(ParseExpression):
    """
    Requires that at least one C{ParseExpression} is found.
    If two expressions match, the first one listed is the one that will match.
    May be constructed using the C{'|'} operator.

    Example::
        # construct MatchFirst using '|' operator
        
        # watch the order of expressions to match
        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

        # put more selective expression first
        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    """
    def __init__( self, exprs, savelist = False ):
        super(MatchFirst,self).__init__(exprs, savelist)
        if self.exprs:
            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
        else:
            self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        maxExcLoc = -1
        maxException = None
        for e in self.exprs:
            try:
                ret = e._parse( instring, loc, doActions )
                return ret
            except ParseException as err:
                if err.loc > maxExcLoc:
                    maxException = err
                    maxExcLoc = err.loc
            except IndexError:
                if len(instring) > maxExcLoc:
                    maxException = ParseException(instring,len(instring),e.errmsg,self)
                    maxExcLoc = len(instring)

        # only got here if no expression matched, raise exception for match that made it the furthest
        else:
            if maxException is not None:
                maxException.msg = self.errmsg
                raise maxException
            else:
                raise ParseException(instring, loc, "no defined alternatives to match", self)

    def __ior__(self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass( other )
        return self.append( other ) #MatchFirst( [ self, other ] )

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class Each(ParseExpression):
    """
    Requires all given C{ParseExpression}s to be found, but in any order.
    Expressions may be separated by whitespace.
    May be constructed using the C{'&'} operator.

    Example::
        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
        integer = Word(nums)
        shape_attr = "shape:" + shape_type("shape")
        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
        color_attr = "color:" + color("color")
        size_attr = "size:" + integer("size")

        # use Each (using operator '&') to accept attributes in any order 
        # (shape and posn are required, color and size are optional)
        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

        shape_spec.runTests('''
            shape: SQUARE color: BLACK posn: 100, 120
            shape: CIRCLE size: 50 color: BLUE posn: 50,80
            color:GREEN size:20 shape:TRIANGLE posn:20,40
            '''
            )
    prints::
        shape: SQUARE color: BLACK posn: 100, 120
        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
        - color: BLACK
        - posn: ['100', ',', '120']
          - x: 100
          - y: 120
        - shape: SQUARE


        shape: CIRCLE size: 50 color: BLUE posn: 50,80
        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
        - color: BLUE
        - posn: ['50', ',', '80']
          - x: 50
          - y: 80
        - shape: CIRCLE
        - size: 50


        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
        - color: GREEN
        - posn: ['20', ',', '40']
          - x: 20
          - y: 40
        - shape: TRIANGLE
        - size: 20
    """
    def __init__( self, exprs, savelist = True ):
        super(Each,self).__init__(exprs, savelist)
        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
        self.skipWhitespace = True
        self.initExprGroups = True

    def parseImpl( self, instring, loc, doActions=True ):
        if self.initExprGroups:
            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
            self.optionals = opt1 + opt2
            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
            self.required += self.multirequired
            self.initExprGroups = False
        tmpLoc = loc
        tmpReqd = self.required[:]
        tmpOpt  = self.optionals[:]
        matchOrder = []

        keepMatching = True
        while keepMatching:
            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
            failed = []
            for e in tmpExprs:
                try:
                    tmpLoc = e.tryParse( instring, tmpLoc )
                except ParseException:
                    failed.append(e)
                else:
                    matchOrder.append(self.opt1map.get(id(e),e))
                    if e in tmpReqd:
                        tmpReqd.remove(e)
                    elif e in tmpOpt:
                        tmpOpt.remove(e)
            if len(failed) == len(tmpExprs):
                keepMatching = False

        if tmpReqd:
            missing = ", ".join(_ustr(e) for e in tmpReqd)
            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )

        # add any unmatched Optionals, in case they have default values defined
        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]

        resultlist = []
        for e in matchOrder:
            loc,results = e._parse(instring,loc,doActions)
            resultlist.append(results)

        finalResults = sum(resultlist, ParseResults([]))
        return loc, finalResults

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"

        return self.strRepr

    def checkRecursion( self, parseElementList ):
        subRecCheckList = parseElementList[:] + [ self ]
        for e in self.exprs:
            e.checkRecursion( subRecCheckList )


class ParseElementEnhance(ParserElement):
    """
    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    """
    def __init__( self, expr, savelist=False ):
        super(ParseElementEnhance,self).__init__(savelist)
        if isinstance( expr, basestring ):
            if issubclass(ParserElement._literalStringClass, Token):
                expr = ParserElement._literalStringClass(expr)
            else:
                expr = ParserElement._literalStringClass(Literal(expr))
        self.expr = expr
        self.strRepr = None
        if expr is not None:
            self.mayIndexError = expr.mayIndexError
            self.mayReturnEmpty = expr.mayReturnEmpty
            self.setWhitespaceChars( expr.whiteChars )
            self.skipWhitespace = expr.skipWhitespace
            self.saveAsList = expr.saveAsList
            self.callPreparse = expr.callPreparse
            self.ignoreExprs.extend(expr.ignoreExprs)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.expr is not None:
            return self.expr._parse( instring, loc, doActions, callPreParse=False )
        else:
            raise ParseException("",loc,self.errmsg,self)

    def leaveWhitespace( self ):
        self.skipWhitespace = False
        self.expr = self.expr.copy()
        if self.expr is not None:
            self.expr.leaveWhitespace()
        return self

    def ignore( self, other ):
        if isinstance( other, Suppress ):
            if other not in self.ignoreExprs:
                super( ParseElementEnhance, self).ignore( other )
                if self.expr is not None:
                    self.expr.ignore( self.ignoreExprs[-1] )
        else:
            super( ParseElementEnhance, self).ignore( other )
            if self.expr is not None:
                self.expr.ignore( self.ignoreExprs[-1] )
        return self

    def streamline( self ):
        super(ParseElementEnhance,self).streamline()
        if self.expr is not None:
            self.expr.streamline()
        return self

    def checkRecursion( self, parseElementList ):
        if self in parseElementList:
            raise RecursiveGrammarException( parseElementList+[self] )
        subRecCheckList = parseElementList[:] + [ self ]
        if self.expr is not None:
            self.expr.checkRecursion( subRecCheckList )

    def validate( self, validateTrace=[] ):
        tmp = validateTrace[:]+[self]
        if self.expr is not None:
            self.expr.validate(tmp)
        self.checkRecursion( [] )

    def __str__( self ):
        try:
            return super(ParseElementEnhance,self).__str__()
        except Exception:
            pass

        if self.strRepr is None and self.expr is not None:
            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
        return self.strRepr


class FollowedBy(ParseElementEnhance):
    """
    Lookahead matching of the given parse expression.  C{FollowedBy}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression matches at the current
    position.  C{FollowedBy} always returns a null token list.

    Example::
        # use FollowedBy to match a label only if it is followed by a ':'
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    prints::
        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    """
    def __init__( self, expr ):
        super(FollowedBy,self).__init__(expr)
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        self.expr.tryParse( instring, loc )
        return loc, []


class NotAny(ParseElementEnhance):
    """
    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    does I{not} advance the parsing position within the input string, it only
    verifies that the specified parse expression does I{not} match at the current
    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    always returns a null token list.  May be constructed using the '~' operator.

    Example::
        
    """
    def __init__( self, expr ):
        super(NotAny,self).__init__(expr)
        #~ self.leaveWhitespace()
        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
        self.mayReturnEmpty = True
        self.errmsg = "Found unwanted token, "+_ustr(self.expr)

    def parseImpl( self, instring, loc, doActions=True ):
        if self.expr.canParseNext(instring, loc):
            raise ParseException(instring, loc, self.errmsg, self)
        return loc, []

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "~{" + _ustr(self.expr) + "}"

        return self.strRepr

class _MultipleMatch(ParseElementEnhance):
    def __init__( self, expr, stopOn=None):
        super(_MultipleMatch, self).__init__(expr)
        self.saveAsList = True
        ender = stopOn
        if isinstance(ender, basestring):
            ender = ParserElement._literalStringClass(ender)
        self.not_ender = ~ender if ender is not None else None

    def parseImpl( self, instring, loc, doActions=True ):
        self_expr_parse = self.expr._parse
        self_skip_ignorables = self._skipIgnorables
        check_ender = self.not_ender is not None
        if check_ender:
            try_not_ender = self.not_ender.tryParse
        
        # must be at least one (but first see if we are the stopOn sentinel;
        # if so, fail)
        if check_ender:
            try_not_ender(instring, loc)
        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
        try:
            hasIgnoreExprs = (not not self.ignoreExprs)
            while 1:
                if check_ender:
                    try_not_ender(instring, loc)
                if hasIgnoreExprs:
                    preloc = self_skip_ignorables( instring, loc )
                else:
                    preloc = loc
                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
                if tmptokens or tmptokens.haskeys():
                    tokens += tmptokens
        except (ParseException,IndexError):
            pass

        return loc, tokens
        
class OneOrMore(_MultipleMatch):
    """
    Repetition of one or more of the given expression.
    
    Parameters:
     - expr - expression that must match one or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: BLACK"
        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
        
        # could also be written as
        (attr_expr * (1,)).parseString(text).pprint()
    """

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "{" + _ustr(self.expr) + "}..."

        return self.strRepr

class ZeroOrMore(_MultipleMatch):
    """
    Optional repetition of zero or more of the given expression.
    
    Parameters:
     - expr - expression that must match zero or more times
     - stopOn - (default=C{None}) - expression for a terminating sentinel
          (only required if the sentinel would ordinarily match the repetition 
          expression)          

    Example: similar to L{OneOrMore}
    """
    def __init__( self, expr, stopOn=None):
        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
        self.mayReturnEmpty = True
        
    def parseImpl( self, instring, loc, doActions=True ):
        try:
            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
        except (ParseException,IndexError):
            return loc, []

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "[" + _ustr(self.expr) + "]..."

        return self.strRepr

class _NullToken(object):
    def __bool__(self):
        return False
    __nonzero__ = __bool__
    def __str__(self):
        return ""

_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
    """
    Optional matching of the given expression.

    Parameters:
     - expr - expression that must match zero or more times
     - default (optional) - value to be returned if the optional expression is not found.

    Example::
        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
        zip.runTests('''
            # traditional ZIP code
            12345
            
            # ZIP+4 form
            12101-0001
            
            # invalid ZIP
            98765-
            ''')
    prints::
        # traditional ZIP code
        12345
        ['12345']

        # ZIP+4 form
        12101-0001
        ['12101-0001']

        # invalid ZIP
        98765-
             ^
        FAIL: Expected end of text (at char 5), (line:1, col:6)
    """
    def __init__( self, expr, default=_optionalNotMatched ):
        super(Optional,self).__init__( expr, savelist=False )
        self.saveAsList = self.expr.saveAsList
        self.defaultValue = default
        self.mayReturnEmpty = True

    def parseImpl( self, instring, loc, doActions=True ):
        try:
            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
        except (ParseException,IndexError):
            if self.defaultValue is not _optionalNotMatched:
                if self.expr.resultsName:
                    tokens = ParseResults([ self.defaultValue ])
                    tokens[self.expr.resultsName] = self.defaultValue
                else:
                    tokens = [ self.defaultValue ]
            else:
                tokens = []
        return loc, tokens

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name

        if self.strRepr is None:
            self.strRepr = "[" + _ustr(self.expr) + "]"

        return self.strRepr

class SkipTo(ParseElementEnhance):
    """
    Token for skipping over all undefined text until the matched expression is found.

    Parameters:
     - expr - target expression marking the end of the data to be skipped
     - include - (default=C{False}) if True, the target expression is also parsed 
          (the skipped text and target expression are returned as a 2-element list).
     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
          comments) that might contain false matches to the target expression
     - failOn - (default=C{None}) define expressions that are not allowed to be 
          included in the skipped test; if found before the target expression is found, 
          the SkipTo is not a match

    Example::
        report = '''
            Outstanding Issues Report - 1 Jan 2000

               # | Severity | Description                               |  Days Open
            -----+----------+-------------------------------------------+-----------
             101 | Critical | Intermittent system crash                 |          6
              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
              79 | Minor    | System slow when running too many reports |         47
            '''
        integer = Word(nums)
        SEP = Suppress('|')
        # use SkipTo to simply match everything up until the next SEP
        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
        # - parse action will call token.strip() for each matched token, i.e., the description body
        string_data = SkipTo(SEP, ignore=quotedString)
        string_data.setParseAction(tokenMap(str.strip))
        ticket_expr = (integer("issue_num") + SEP 
                      + string_data("sev") + SEP 
                      + string_data("desc") + SEP 
                      + integer("days_open"))
        
        for tkt in ticket_expr.searchString(report):
            print tkt.dump()
    prints::
        ['101', 'Critical', 'Intermittent system crash', '6']
        - days_open: 6
        - desc: Intermittent system crash
        - issue_num: 101
        - sev: Critical
        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
        - days_open: 14
        - desc: Spelling error on Login ('log|n')
        - issue_num: 94
        - sev: Cosmetic
        ['79', 'Minor', 'System slow when running too many reports', '47']
        - days_open: 47
        - desc: System slow when running too many reports
        - issue_num: 79
        - sev: Minor
    """
    def __init__( self, other, include=False, ignore=None, failOn=None ):
        super( SkipTo, self ).__init__( other )
        self.ignoreExpr = ignore
        self.mayReturnEmpty = True
        self.mayIndexError = False
        self.includeMatch = include
        self.asList = False
        if isinstance(failOn, basestring):
            self.failOn = ParserElement._literalStringClass(failOn)
        else:
            self.failOn = failOn
        self.errmsg = "No match found for "+_ustr(self.expr)

    def parseImpl( self, instring, loc, doActions=True ):
        startloc = loc
        instrlen = len(instring)
        expr = self.expr
        expr_parse = self.expr._parse
        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
        
        tmploc = loc
        while tmploc <= instrlen:
            if self_failOn_canParseNext is not None:
                # break if failOn expression matches
                if self_failOn_canParseNext(instring, tmploc):
                    break
                    
            if self_ignoreExpr_tryParse is not None:
                # advance past ignore expressions
                while 1:
                    try:
                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
                    except ParseBaseException:
                        break
            
            try:
                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
            except (ParseException, IndexError):
                # no match, advance loc in string
                tmploc += 1
            else:
                # matched skipto expr, done
                break

        else:
            # ran off the end of the input string without matching skipto expr, fail
            raise ParseException(instring, loc, self.errmsg, self)

        # build up return values
        loc = tmploc
        skiptext = instring[startloc:loc]
        skipresult = ParseResults(skiptext)
        
        if self.includeMatch:
            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
            skipresult += mat

        return loc, skipresult

class Forward(ParseElementEnhance):
    """
    Forward declaration of an expression to be defined later -
    used for recursive grammars, such as algebraic infix notation.
    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
    Specifically, '|' has a lower precedence than '<<', so that::
        fwdExpr << a | b | c
    will actually be evaluated as::
        (fwdExpr << a) | b | c
    thereby leaving b and c out as parseable alternatives.  It is recommended that you
    explicitly group the values inserted into the C{Forward}::
        fwdExpr << (a | b | c)
    Converting to use the '<<=' operator instead will avoid this problem.

    See L{ParseResults.pprint} for an example of a recursive parser created using
    C{Forward}.
    """
    def __init__( self, other=None ):
        super(Forward,self).__init__( other, savelist=False )

    def __lshift__( self, other ):
        if isinstance( other, basestring ):
            other = ParserElement._literalStringClass(other)
        self.expr = other
        self.strRepr = None
        self.mayIndexError = self.expr.mayIndexError
        self.mayReturnEmpty = self.expr.mayReturnEmpty
        self.setWhitespaceChars( self.expr.whiteChars )
        self.skipWhitespace = self.expr.skipWhitespace
        self.saveAsList = self.expr.saveAsList
        self.ignoreExprs.extend(self.expr.ignoreExprs)
        return self
        
    def __ilshift__(self, other):
        return self << other
    
    def leaveWhitespace( self ):
        self.skipWhitespace = False
        return self

    def streamline( self ):
        if not self.streamlined:
            self.streamlined = True
            if self.expr is not None:
                self.expr.streamline()
        return self

    def validate( self, validateTrace=[] ):
        if self not in validateTrace:
            tmp = validateTrace[:]+[self]
            if self.expr is not None:
                self.expr.validate(tmp)
        self.checkRecursion([])

    def __str__( self ):
        if hasattr(self,"name"):
            return self.name
        return self.__class__.__name__ + ": ..."

        # stubbed out for now - creates awful memory and perf issues
        self._revertClass = self.__class__
        self.__class__ = _ForwardNoRecurse
        try:
            if self.expr is not None:
                retString = _ustr(self.expr)
            else:
                retString = "None"
        finally:
            self.__class__ = self._revertClass
        return self.__class__.__name__ + ": " + retString

    def copy(self):
        if self.expr is not None:
            return super(Forward,self).copy()
        else:
            ret = Forward()
            ret <<= self
            return ret

class _ForwardNoRecurse(Forward):
    def __str__( self ):
        return "..."

class TokenConverter(ParseElementEnhance):
    """
    Abstract subclass of C{ParseExpression}, for converting parsed results.
    """
    def __init__( self, expr, savelist=False ):
        super(TokenConverter,self).__init__( expr )#, savelist )
        self.saveAsList = False

class Combine(TokenConverter):
    """
    Converter to concatenate all matching tokens to a single string.
    By default, the matching patterns must also be contiguous in the input string;
    this can be disabled by specifying C{'adjacent=False'} in the constructor.

    Example::
        real = Word(nums) + '.' + Word(nums)
        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
        # will also erroneously match the following
        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

        real = Combine(Word(nums) + '.' + Word(nums))
        print(real.parseString('3.1416')) # -> ['3.1416']
        # no match when there are internal spaces
        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    """
    def __init__( self, expr, joinString="", adjacent=True ):
        super(Combine,self).__init__( expr )
        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
        if adjacent:
            self.leaveWhitespace()
        self.adjacent = adjacent
        self.skipWhitespace = True
        self.joinString = joinString
        self.callPreparse = True

    def ignore( self, other ):
        if self.adjacent:
            ParserElement.ignore(self, other)
        else:
            super( Combine, self).ignore( other )
        return self

    def postParse( self, instring, loc, tokenlist ):
        retToks = tokenlist.copy()
        del retToks[:]
        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)

        if self.resultsName and retToks.haskeys():
            return [ retToks ]
        else:
            return retToks

class Group(TokenConverter):
    """
    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

    Example::
        ident = Word(alphas)
        num = Word(nums)
        term = ident | num
        func = ident + Optional(delimitedList(term))
        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

        func = ident + Group(Optional(delimitedList(term)))
        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    """
    def __init__( self, expr ):
        super(Group,self).__init__( expr )
        self.saveAsList = True

    def postParse( self, instring, loc, tokenlist ):
        return [ tokenlist ]

class Dict(TokenConverter):
    """
    Converter to return a repetitive expression as a list, but also as a dictionary.
    Each element can also be referenced using the first token in the expression as its key.
    Useful for tabular report scraping when the first column can be used as a item key.

    Example::
        data_word = Word(alphas)
        label = data_word + FollowedBy(':')
        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        
        # print attributes as plain groups
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
        print(result.dump())
        
        # access named fields as dict entries, or output as dict
        print(result['shape'])        
        print(result.asDict())
    prints::
        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    See more examples at L{ParseResults} of accessing fields by results name.
    """
    def __init__( self, expr ):
        super(Dict,self).__init__( expr )
        self.saveAsList = True

    def postParse( self, instring, loc, tokenlist ):
        for i,tok in enumerate(tokenlist):
            if len(tok) == 0:
                continue
            ikey = tok[0]
            if isinstance(ikey,int):
                ikey = _ustr(tok[0]).strip()
            if len(tok)==1:
                tokenlist[ikey] = _ParseResultsWithOffset("",i)
            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
            else:
                dictvalue = tok.copy() #ParseResults(i)
                del dictvalue[0]
                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
                else:
                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)

        if self.resultsName:
            return [ tokenlist ]
        else:
            return tokenlist


class Suppress(TokenConverter):
    """
    Converter for ignoring the results of a parsed expression.

    Example::
        source = "a, b, c,d"
        wd = Word(alphas)
        wd_list1 = wd + ZeroOrMore(',' + wd)
        print(wd_list1.parseString(source))

        # often, delimiters that are useful during parsing are just in the
        # way afterward - use Suppress to keep them out of the parsed output
        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
        print(wd_list2.parseString(source))
    prints::
        ['a', ',', 'b', ',', 'c', ',', 'd']
        ['a', 'b', 'c', 'd']
    (See also L{delimitedList}.)
    """
    def postParse( self, instring, loc, tokenlist ):
        return []

    def suppress( self ):
        return self


class OnlyOnce(object):
    """
    Wrapper for parse actions, to ensure they are only called once.
    """
    def __init__(self, methodCall):
        self.callable = _trim_arity(methodCall)
        self.called = False
    def __call__(self,s,l,t):
        if not self.called:
            results = self.callable(s,l,t)
            self.called = True
            return results
        raise ParseException(s,l,"")
    def reset(self):
        self.called = False

def traceParseAction(f):
    """
    Decorator for debugging parse actions. 
    
    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

    Example::
        wd = Word(alphas)

        @traceParseAction
        def remove_duplicate_chars(tokens):
            return ''.join(sorted(set(''.join(tokens))))

        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    prints::
        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
        <<leaving remove_duplicate_chars (ret: 'dfjkls')
        ['dfjkls']
    """
    f = _trim_arity(f)
    def z(*paArgs):
        thisFunc = f.__name__
        s,l,t = paArgs[-3:]
        if len(paArgs)>3:
            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
        try:
            ret = f(*paArgs)
        except Exception as exc:
            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
            raise
        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
        return ret
    try:
        z.__name__ = f.__name__
    except AttributeError:
        pass
    return z

#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
    """
    Helper to define a delimited list of expressions - the delimiter defaults to ','.
    By default, the list elements and delimiters can have intervening whitespace, and
    comments, but this can be overridden by passing C{combine=True} in the constructor.
    If C{combine} is set to C{True}, the matching tokens are returned as a single token
    string, with the delimiters included; otherwise, the matching tokens are returned
    as a list of tokens, with the delimiters suppressed.

    Example::
        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    """
    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
    if combine:
        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
    else:
        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)

def countedArray( expr, intExpr=None ):
    """
    Helper to define a counted list of expressions.
    This helper defines a pattern of the form::
        integer expr expr expr...
    where the leading integer tells how many expr expressions follow.
    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    
    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

    Example::
        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

        # in this parser, the leading integer value is given in binary,
        # '10' indicating that 2 values are in the array
        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    """
    arrayExpr = Forward()
    def countFieldParseAction(s,l,t):
        n = t[0]
        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
        return []
    if intExpr is None:
        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
    else:
        intExpr = intExpr.copy()
    intExpr.setName("arrayLen")
    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')

def _flatten(L):
    ret = []
    for i in L:
        if isinstance(i,list):
            ret.extend(_flatten(i))
        else:
            ret.append(i)
    return ret

def matchPreviousLiteral(expr):
    """
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousLiteral(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    If this is not desired, use C{matchPreviousExpr}.
    Do I{not} use with packrat parsing enabled.
    """
    rep = Forward()
    def copyTokenToRepeater(s,l,t):
        if t:
            if len(t) == 1:
                rep << t[0]
            else:
                # flatten t tokens
                tflat = _flatten(t.asList())
                rep << And(Literal(tt) for tt in tflat)
        else:
            rep << Empty()
    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    rep.setName('(prev) ' + _ustr(expr))
    return rep

def matchPreviousExpr(expr):
    """
    Helper to define an expression that is indirectly defined from
    the tokens matched in a previous expression, that is, it looks
    for a 'repeat' of a previous expression.  For example::
        first = Word(nums)
        second = matchPreviousExpr(first)
        matchExpr = first + ":" + second
    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    the expressions are evaluated first, and then compared, so
    C{"1"} is compared with C{"10"}.
    Do I{not} use with packrat parsing enabled.
    """
    rep = Forward()
    e2 = expr.copy()
    rep <<= e2
    def copyTokenToRepeater(s,l,t):
        matchTokens = _flatten(t.asList())
        def mustMatchTheseTokens(s,l,t):
            theseTokens = _flatten(t.asList())
            if  theseTokens != matchTokens:
                raise ParseException("",0,"")
        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    rep.setName('(prev) ' + _ustr(expr))
    return rep

def _escapeRegexRangeChars(s):
    #~  escape these chars: ^-]
    for c in r"\^-]":
        s = s.replace(c,_bslash+c)
    s = s.replace("\n",r"\n")
    s = s.replace("\t",r"\t")
    return _ustr(s)

def oneOf( strs, caseless=False, useRegex=True ):
    """
    Helper to quickly define a set of alternative Literals, and makes sure to do
    longest-first testing when there is a conflict, regardless of the input order,
    but returns a C{L{MatchFirst}} for best performance.

    Parameters:
     - strs - a string of space-delimited literals, or a collection of string literals
     - caseless - (default=C{False}) - treat all literals as caseless
     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
          if creating a C{Regex} raises an exception)

    Example::
        comp_oper = oneOf("< = > <= >= !=")
        var = Word(alphas)
        number = Word(nums)
        term = var | number
        comparison_expr = term + comp_oper + term
        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    prints::
        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    """
    if caseless:
        isequal = ( lambda a,b: a.upper() == b.upper() )
        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
        parseElementClass = CaselessLiteral
    else:
        isequal = ( lambda a,b: a == b )
        masks = ( lambda a,b: b.startswith(a) )
        parseElementClass = Literal

    symbols = []
    if isinstance(strs,basestring):
        symbols = strs.split()
    elif isinstance(strs, Iterable):
        symbols = list(strs)
    else:
        warnings.warn("Invalid argument to oneOf, expected string or iterable",
                SyntaxWarning, stacklevel=2)
    if not symbols:
        return NoMatch()

    i = 0
    while i < len(symbols)-1:
        cur = symbols[i]
        for j,other in enumerate(symbols[i+1:]):
            if ( isequal(other, cur) ):
                del symbols[i+j+1]
                break
            elif ( masks(cur, other) ):
                del symbols[i+j+1]
                symbols.insert(i,other)
                cur = other
                break
        else:
            i += 1

    if not caseless and useRegex:
        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
        try:
            if len(symbols)==len("".join(symbols)):
                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
            else:
                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
        except Exception:
            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
                    SyntaxWarning, stacklevel=2)


    # last resort, just use MatchFirst
    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))

def dictOf( key, value ):
    """
    Helper to easily and clearly define a dictionary by specifying the respective patterns
    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    in the proper order.  The key pattern can include delimiting markers or punctuation,
    as long as they are suppressed, thereby leaving the significant key text.  The value
    pattern can include named results, so that the C{Dict} results can include named token
    fields.

    Example::
        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
        print(OneOrMore(attr_expr).parseString(text).dump())
        
        attr_label = label
        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

        # similar to Dict, but simpler call format
        result = dictOf(attr_label, attr_value).parseString(text)
        print(result.dump())
        print(result['shape'])
        print(result.shape)  # object attribute access works too
        print(result.asDict())
    prints::
        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
        - color: light blue
        - posn: upper left
        - shape: SQUARE
        - texture: burlap
        SQUARE
        SQUARE
        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    """
    return Dict( ZeroOrMore( Group ( key + value ) ) )

def originalTextFor(expr, asString=True):
    """
    Helper to return the original, untokenized text for a given expression.  Useful to
    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    revert separate tokens with intervening whitespace back to the original matching
    input text. By default, returns astring containing the original parsed text.  
       
    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    C{L{ParseResults}} containing any results names that were originally matched, and a 
    single token containing the original matched text from the input string.  So if 
    the expression passed to C{L{originalTextFor}} contains expressions with defined
    results names, you must set C{asString} to C{False} if you want to preserve those
    results name values.

    Example::
        src = "this is test <b> bold <i>text</i> </b> normal text "
        for tag in ("b","i"):
            opener,closer = makeHTMLTags(tag)
            patt = originalTextFor(opener + SkipTo(closer) + closer)
            print(patt.searchString(src)[0])
    prints::
        ['<b> bold <i>text</i> </b>']
        ['<i>text</i>']
    """
    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
    endlocMarker = locMarker.copy()
    endlocMarker.callPreparse = False
    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
    if asString:
        extractText = lambda s,l,t: s[t._original_start:t._original_end]
    else:
        def extractText(s,l,t):
            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
    matchExpr.setParseAction(extractText)
    matchExpr.ignoreExprs = expr.ignoreExprs
    return matchExpr

def ungroup(expr): 
    """
    Helper to undo pyparsing's default grouping of And expressions, even
    if all but one are non-empty.
    """
    return TokenConverter(expr).setParseAction(lambda t:t[0])

def locatedExpr(expr):
    """
    Helper to decorate a returned token with its starting and ending locations in the input string.
    This helper adds the following results names:
     - locn_start = location where matched expression begins
     - locn_end = location where matched expression ends
     - value = the actual parsed results

    Be careful if the input text contains C{<TAB>} characters, you may want to call
    C{L{ParserElement.parseWithTabs}}

    Example::
        wd = Word(alphas)
        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
            print(match)
    prints::
        [[0, 'ljsdf', 5]]
        [[8, 'lksdjjf', 15]]
        [[18, 'lkkjj', 23]]
    """
    locator = Empty().setParseAction(lambda s,l,t: l)
    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))


# convenience constants for positional expressions
empty       = Empty().setName("empty")
lineStart   = LineStart().setName("lineStart")
lineEnd     = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd   = StringEnd().setName("stringEnd")

_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"

def srange(s):
    r"""
    Helper to easily define string ranges for use in Word construction.  Borrows
    syntax from regexp '[]' string range definitions::
        srange("[0-9]")   -> "0123456789"
        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    The input string must be enclosed in []'s, and the returned string is the expanded
    character set joined into a single string.
    The values enclosed in the []'s may be:
     - a single character
     - an escaped character with a leading backslash (such as C{\-} or C{\]})
     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
         (C{\0x##} is also supported for backwards compatibility) 
     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    """
    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
    try:
        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
    except Exception:
        return ""

def matchOnlyAtCol(n):
    """
    Helper method for defining parse actions that require matching at a specific
    column in the input text.
    """
    def verifyCol(strg,locn,toks):
        if col(locn,strg) != n:
            raise ParseException(strg,locn,"matched token not at column %d" % n)
    return verifyCol

def replaceWith(replStr):
    """
    Helper method for common parse actions that simply return a literal value.  Especially
    useful when used with C{L{transformString<ParserElement.transformString>}()}.

    Example::
        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
        term = na | num
        
        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    """
    return lambda s,l,t: [replStr]

def removeQuotes(s,l,t):
    """
    Helper parse action for removing quotation marks from parsed quoted strings.

    Example::
        # by default, quotation marks are included in parsed results
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

        # use removeQuotes to strip quotation marks from parsed results
        quotedString.setParseAction(removeQuotes)
        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    """
    return t[0][1:-1]

def tokenMap(func, *args):
    """
    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    args are passed, they are forwarded to the given function as additional arguments after
    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    parsed data to an integer using base 16.

    Example (compare the last to example in L{ParserElement.transformString}::
        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
        hex_ints.runTests('''
            00 11 22 aa FF 0a 0d 1a
            ''')
        
        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
        OneOrMore(upperword).runTests('''
            my kingdom for a horse
            ''')

        wd = Word(alphas).setParseAction(tokenMap(str.title))
        OneOrMore(wd).setParseAction(' '.join).runTests('''
            now is the winter of our discontent made glorious summer by this sun of york
            ''')
    prints::
        00 11 22 aa FF 0a 0d 1a
        [0, 17, 34, 170, 255, 10, 13, 26]

        my kingdom for a horse
        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

        now is the winter of our discontent made glorious summer by this sun of york
        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    """
    def pa(s,l,t):
        return [func(tokn, *args) for tokn in t]

    try:
        func_name = getattr(func, '__name__', 
                            getattr(func, '__class__').__name__)
    except Exception:
        func_name = str(func)
    pa.__name__ = func_name

    return pa

upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""

downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
    
def _makeTags(tagStr, xml):
    """Internal helper to construct opening and closing tag expressions, given a tag name"""
    if isinstance(tagStr,basestring):
        resname = tagStr
        tagStr = Keyword(tagStr, caseless=not xml)
    else:
        resname = tagStr.name

    tagAttrName = Word(alphas,alphanums+"_-:")
    if (xml):
        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
        openTag = Suppress("<") + tagStr("tag") + \
                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    else:
        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
        openTag = Suppress("<") + tagStr("tag") + \
                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
                Optional( Suppress("=") + tagAttrValue ) ))) + \
                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    closeTag = Combine(_L("</") + tagStr + ">")

    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
    openTag.tag = resname
    closeTag.tag = resname
    return openTag, closeTag

def makeHTMLTags(tagStr):
    """
    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

    Example::
        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
        a,a_end = makeHTMLTags("A")
        link_expr = a + SkipTo(a_end)("link_text") + a_end
        
        for link in link_expr.searchString(text):
            # attributes in the <A> tag (like "href" shown here) are also accessible as named results
            print(link.link_text, '->', link.href)
    prints::
        pyparsing -> http://pyparsing.wikispaces.com
    """
    return _makeTags( tagStr, False )

def makeXMLTags(tagStr):
    """
    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    tags only in the given upper/lower case.

    Example: similar to L{makeHTMLTags}
    """
    return _makeTags( tagStr, True )

def withAttribute(*args,**attrDict):
    """
    Helper to create a validating parse action to be used with start tags created
    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    with a required attribute value, to avoid false matches on common tags such as
    C{<TD>} or C{<DIV>}.

    Call C{withAttribute} with a series of attribute names and values. Specify the list
    of filter attributes names and values as:
     - keyword arguments, as in C{(align="right")}, or
     - as an explicit dict with C{**} operator, when an attribute name is also a Python
          reserved word, as in C{**{"class":"Customer", "align":"right"}}
     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
    For attribute names with a namespace prefix, you must use the second form.  Attribute
    names are matched insensitive to upper/lower case.
       
    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.

    To verify that the attribute exists, but without specifying a value, pass
    C{withAttribute.ANY_VALUE} as the value.

    Example::
        html = '''
            <div>
            Some text
            <div type="grid">1 4 0 1 0</div>
            <div type="graph">1,3 2,3 1,1</div>
            <div>this has no type</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")

        # only match div tag having a type attribute with value "grid"
        div_grid = div().setParseAction(withAttribute(type="grid"))
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        # construct a match with any div tag having a type attribute, regardless of the value
        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    """
    if args:
        attrs = args[:]
    else:
        attrs = attrDict.items()
    attrs = [(k,v) for k,v in attrs]
    def pa(s,l,tokens):
        for attrName,attrValue in attrs:
            if attrName not in tokens:
                raise ParseException(s,l,"no matching attribute " + attrName)
            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
                                            (attrName, tokens[attrName], attrValue))
    return pa
withAttribute.ANY_VALUE = object()

def withClass(classname, namespace=''):
    """
    Simplified version of C{L{withAttribute}} when matching on a div class - made
    difficult because C{class} is a reserved word in Python.

    Example::
        html = '''
            <div>
            Some text
            <div class="grid">1 4 0 1 0</div>
            <div class="graph">1,3 2,3 1,1</div>
            <div>this &lt;div&gt; has no class</div>
            </div>
                
        '''
        div,div_end = makeHTMLTags("div")
        div_grid = div().setParseAction(withClass("grid"))
        
        grid_expr = div_grid + SkipTo(div | div_end)("body")
        for grid_header in grid_expr.searchString(html):
            print(grid_header.body)
        
        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
        div_expr = div_any_type + SkipTo(div | div_end)("body")
        for div_header in div_expr.searchString(html):
            print(div_header.body)
    prints::
        1 4 0 1 0

        1 4 0 1 0
        1,3 2,3 1,1
    """
    classattr = "%s:class" % namespace if namespace else "class"
    return withAttribute(**{classattr : classname})        

opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()

def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
    """
    Helper method for constructing grammars of expressions made up of
    operators working in a precedence hierarchy.  Operators may be unary or
    binary, left- or right-associative.  Parse actions can also be attached
    to operator expressions. The generated parser will also recognize the use 
    of parentheses to override operator precedences (see example below).
    
    Note: if you define a deep operator list, you may see performance issues
    when using infixNotation. See L{ParserElement.enablePackrat} for a
    mechanism to potentially improve your parser performance.

    Parameters:
     - baseExpr - expression representing the most basic element for the nested
     - opList - list of tuples, one for each operator precedence level in the
      expression grammar; each tuple is of the form
      (opExpr, numTerms, rightLeftAssoc, parseAction), where:
       - opExpr is the pyparsing expression for the operator;
          may also be a string, which will be converted to a Literal;
          if numTerms is 3, opExpr is a tuple of two expressions, for the
          two operators separating the 3 terms
       - numTerms is the number of terms for this operator (must
          be 1, 2, or 3)
       - rightLeftAssoc is the indicator whether the operator is
          right or left associative, using the pyparsing-defined
          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
       - parseAction is the parse action to be associated with
          expressions matching this operator expression (the
          parse action tuple member may be omitted); if the parse action
          is passed a tuple or list of functions, this is equivalent to
          calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})

    Example::
        # simple example of four-function arithmetic with ints and variable names
        integer = pyparsing_common.signed_integer
        varname = pyparsing_common.identifier 
        
        arith_expr = infixNotation(integer | varname,
            [
            ('-', 1, opAssoc.RIGHT),
            (oneOf('* /'), 2, opAssoc.LEFT),
            (oneOf('+ -'), 2, opAssoc.LEFT),
            ])
        
        arith_expr.runTests('''
            5+3*6
            (5+3)*6
            -2--11
            ''', fullDump=False)
    prints::
        5+3*6
        [[5, '+', [3, '*', 6]]]

        (5+3)*6
        [[[5, '+', 3], '*', 6]]

        -2--11
        [[['-', 2], '-', ['-', 11]]]
    """
    ret = Forward()
    lastExpr = baseExpr | ( lpar + ret + rpar )
    for i,operDef in enumerate(opList):
        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
        if arity == 3:
            if opExpr is None or len(opExpr) != 2:
                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
            opExpr1, opExpr2 = opExpr
        thisExpr = Forward().setName(termName)
        if rightLeftAssoc == opAssoc.LEFT:
            if arity == 1:
                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
            elif arity == 2:
                if opExpr is not None:
                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
                else:
                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
            elif arity == 3:
                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
            else:
                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
        elif rightLeftAssoc == opAssoc.RIGHT:
            if arity == 1:
                # try to avoid LR with this extra test
                if not isinstance(opExpr, Optional):
                    opExpr = Optional(opExpr)
                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
            elif arity == 2:
                if opExpr is not None:
                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
                else:
                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
            elif arity == 3:
                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
            else:
                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
        else:
            raise ValueError("operator must indicate right or left associativity")
        if pa:
            if isinstance(pa, (tuple, list)):
                matchExpr.setParseAction(*pa)
            else:
                matchExpr.setParseAction(pa)
        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
        lastExpr = thisExpr
    ret <<= lastExpr
    return ret

operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""

dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")

def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
    """
    Helper method for defining nested lists enclosed in opening and closing
    delimiters ("(" and ")" are the default).

    Parameters:
     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
     - content - expression for items within the nested lists (default=C{None})
     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})

    If an expression is not provided for the content argument, the nested
    expression will capture all whitespace-delimited content between delimiters
    as a list of separate values.

    Use the C{ignoreExpr} argument to define expressions that may contain
    opening or closing characters that should not be treated as opening
    or closing characters for nesting, such as quotedString or a comment
    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
    The default is L{quotedString}, but if no expressions are to be ignored,
    then pass C{None} for this argument.

    Example::
        data_type = oneOf("void int short long char float double")
        decl_data_type = Combine(data_type + Optional(Word('*')))
        ident = Word(alphas+'_', alphanums+'_')
        number = pyparsing_common.number
        arg = Group(decl_data_type + ident)
        LPAR,RPAR = map(Suppress, "()")

        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))

        c_function = (decl_data_type("type") 
                      + ident("name")
                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 
                      + code_body("body"))
        c_function.ignore(cStyleComment)
        
        source_code = '''
            int is_odd(int x) { 
                return (x%2); 
            }
                
            int dec_to_hex(char hchar) { 
                if (hchar >= '0' && hchar <= '9') { 
                    return (ord(hchar)-ord('0')); 
                } else { 
                    return (10+ord(hchar)-ord('A'));
                } 
            }
        '''
        for func in c_function.searchString(source_code):
            print("%(name)s (%(type)s) args: %(args)s" % func)

    prints::
        is_odd (int) args: [['int', 'x']]
        dec_to_hex (int) args: [['char', 'hchar']]
    """
    if opener == closer:
        raise ValueError("opening and closing strings cannot be the same")
    if content is None:
        if isinstance(opener,basestring) and isinstance(closer,basestring):
            if len(opener) == 1 and len(closer)==1:
                if ignoreExpr is not None:
                    content = (Combine(OneOrMore(~ignoreExpr +
                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
                                ).setParseAction(lambda t:t[0].strip()))
                else:
                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
                                ).setParseAction(lambda t:t[0].strip()))
            else:
                if ignoreExpr is not None:
                    content = (Combine(OneOrMore(~ignoreExpr + 
                                    ~Literal(opener) + ~Literal(closer) +
                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
                                ).setParseAction(lambda t:t[0].strip()))
                else:
                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
                                ).setParseAction(lambda t:t[0].strip()))
        else:
            raise ValueError("opening and closing arguments must be strings if no content expression is given")
    ret = Forward()
    if ignoreExpr is not None:
        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
    else:
        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )
    ret.setName('nested %s%s expression' % (opener,closer))
    return ret

def indentedBlock(blockStatementExpr, indentStack, indent=True):
    """
    Helper method for defining space-delimited indentation blocks, such as
    those used to define block statements in Python source code.

    Parameters:
     - blockStatementExpr - expression defining syntax of statement that
            is repeated within the indented block
     - indentStack - list created by caller to manage indentation stack
            (multiple statementWithIndentedBlock expressions within a single grammar
            should share a common indentStack)
     - indent - boolean indicating whether block must be indented beyond the
            the current level; set to False for block of left-most statements
            (default=C{True})

    A valid block must contain at least one C{blockStatement}.

    Example::
        data = '''
        def A(z):
          A1
          B = 100
          G = A2
          A2
          A3
        B
        def BB(a,b,c):
          BB1
          def BBA():
            bba1
            bba2
            bba3
        C
        D
        def spam(x,y):
             def eggs(z):
                 pass
        '''


        indentStack = [1]
        stmt = Forward()

        identifier = Word(alphas, alphanums)
        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
        func_body = indentedBlock(stmt, indentStack)
        funcDef = Group( funcDecl + func_body )

        rvalue = Forward()
        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
        rvalue << (funcCall | identifier | Word(nums))
        assignment = Group(identifier + "=" + rvalue)
        stmt << ( funcDef | assignment | identifier )

        module_body = OneOrMore(stmt)

        parseTree = module_body.parseString(data)
        parseTree.pprint()
    prints::
        [['def',
          'A',
          ['(', 'z', ')'],
          ':',
          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
         'B',
         ['def',
          'BB',
          ['(', 'a', 'b', 'c', ')'],
          ':',
          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
         'C',
         'D',
         ['def',
          'spam',
          ['(', 'x', 'y', ')'],
          ':',
          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 
    """
    def checkPeerIndent(s,l,t):
        if l >= len(s): return
        curCol = col(l,s)
        if curCol != indentStack[-1]:
            if curCol > indentStack[-1]:
                raise ParseFatalException(s,l,"illegal nesting")
            raise ParseException(s,l,"not a peer entry")

    def checkSubIndent(s,l,t):
        curCol = col(l,s)
        if curCol > indentStack[-1]:
            indentStack.append( curCol )
        else:
            raise ParseException(s,l,"not a subentry")

    def checkUnindent(s,l,t):
        if l >= len(s): return
        curCol = col(l,s)
        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
            raise ParseException(s,l,"not an unindent")
        indentStack.pop()

    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
    PEER   = Empty().setParseAction(checkPeerIndent).setName('')
    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
    if indent:
        smExpr = Group( Optional(NL) +
            #~ FollowedBy(blockStatementExpr) +
            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
    else:
        smExpr = Group( Optional(NL) +
            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
    blockStatementExpr.ignore(_bslash + LineEnd())
    return smExpr.setName('indented block')

alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")

anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
    """Helper parser action to replace common HTML entities with their special characters"""
    return _htmlEntityMap.get(t.entity)

# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"

htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"

restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"

cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"

javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"

pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"

_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
                                  Optional( Word(" \t") +
                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
   This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""

# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
    """
    Here are some common low-level expressions that may be useful in jump-starting parser development:
     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
     - common L{programming identifiers<identifier>}
     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
     - L{UUID<uuid>}
     - L{comma-separated list<comma_separated_list>}
    Parse actions:
     - C{L{convertToInteger}}
     - C{L{convertToFloat}}
     - C{L{convertToDate}}
     - C{L{convertToDatetime}}
     - C{L{stripHTMLTags}}
     - C{L{upcaseTokens}}
     - C{L{downcaseTokens}}

    Example::
        pyparsing_common.number.runTests('''
            # any int or real number, returned as the appropriate type
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.fnumber.runTests('''
            # any int or real number, returned as float
            100
            -100
            +100
            3.14159
            6.02e23
            1e-12
            ''')

        pyparsing_common.hex_integer.runTests('''
            # hex numbers
            100
            FF
            ''')

        pyparsing_common.fraction.runTests('''
            # fractions
            1/2
            -3/4
            ''')

        pyparsing_common.mixed_integer.runTests('''
            # mixed fractions
            1
            1/2
            -3/4
            1-3/4
            ''')

        import uuid
        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
        pyparsing_common.uuid.runTests('''
            # uuid
            12345678-1234-5678-1234-567812345678
            ''')
    prints::
        # any int or real number, returned as the appropriate type
        100
        [100]

        -100
        [-100]

        +100
        [100]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # any int or real number, returned as float
        100
        [100.0]

        -100
        [-100.0]

        +100
        [100.0]

        3.14159
        [3.14159]

        6.02e23
        [6.02e+23]

        1e-12
        [1e-12]

        # hex numbers
        100
        [256]

        FF
        [255]

        # fractions
        1/2
        [0.5]

        -3/4
        [-0.75]

        # mixed fractions
        1
        [1]

        1/2
        [0.5]

        -3/4
        [-0.75]

        1-3/4
        [1.75]

        # uuid
        12345678-1234-5678-1234-567812345678
        [UUID('12345678-1234-5678-1234-567812345678')]
    """

    convertToInteger = tokenMap(int)
    """
    Parse action for converting parsed integers to Python int
    """

    convertToFloat = tokenMap(float)
    """
    Parse action for converting parsed numbers to Python float
    """

    integer = Word(nums).setName("integer").setParseAction(convertToInteger)
    """expression that parses an unsigned integer, returns an int"""

    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
    """expression that parses a hexadecimal integer, returns an int"""

    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
    """expression that parses an integer with optional leading sign, returns an int"""

    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
    """fractional expression of an integer divided by an integer, returns a float"""
    fraction.addParseAction(lambda t: t[0]/t[-1])

    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
    mixed_integer.addParseAction(sum)

    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
    """expression that parses a floating point number and returns a float"""

    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
    """expression that parses a floating point number with optional scientific notation and returns a float"""

    # streamlining this expression makes the docs nicer-looking
    number = (sci_real | real | signed_integer).streamline()
    """any numeric expression, returns the corresponding Python type"""

    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
    """any int or real number, returned as float"""
    
    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
    
    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
    "IPv4 address (C{0.0.0.0 - 255.255.255.255})"

    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
    "IPv6 address (long, short, or mixed form)"
    
    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"

    @staticmethod
    def convertToDate(fmt="%Y-%m-%d"):
        """
        Helper to create a parse action for converting parsed date string to Python datetime.date

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})

        Example::
            date_expr = pyparsing_common.iso8601_date.copy()
            date_expr.setParseAction(pyparsing_common.convertToDate())
            print(date_expr.parseString("1999-12-31"))
        prints::
            [datetime.date(1999, 12, 31)]
        """
        def cvt_fn(s,l,t):
            try:
                return datetime.strptime(t[0], fmt).date()
            except ValueError as ve:
                raise ParseException(s, l, str(ve))
        return cvt_fn

    @staticmethod
    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
        """
        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime

        Params -
         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})

        Example::
            dt_expr = pyparsing_common.iso8601_datetime.copy()
            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
        prints::
            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
        """
        def cvt_fn(s,l,t):
            try:
                return datetime.strptime(t[0], fmt)
            except ValueError as ve:
                raise ParseException(s, l, str(ve))
        return cvt_fn

    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
    "ISO8601 date (C{yyyy-mm-dd})"

    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
    "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"

    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
    "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"

    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
    @staticmethod
    def stripHTMLTags(s, l, tokens):
        """
        Parse action to remove HTML tags from web page HTML source

        Example::
            # strip HTML links from normal text 
            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
            td,td_end = makeHTMLTags("TD")
            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
            
            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
        """
        return pyparsing_common._html_stripper.transformString(tokens[0])

    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') 
                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""

    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
    """Parse action to convert tokens to upper case."""

    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
    """Parse action to convert tokens to lower case."""


if __name__ == "__main__":

    selectToken    = CaselessLiteral("select")
    fromToken      = CaselessLiteral("from")

    ident          = Word(alphas, alphanums + "_$")

    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
    columnNameList = Group(delimitedList(columnName)).setName("columns")
    columnSpec     = ('*' | columnNameList)

    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
    tableNameList  = Group(delimitedList(tableName)).setName("tables")
    
    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")

    # demo runTests method, including embedded comments in test string
    simpleSQL.runTests("""
        # '*' as column list and dotted table name
        select * from SYS.XYZZY

        # caseless match on "SELECT", and casts back to "select"
        SELECT * from XYZZY, ABC

        # list of column names, and mixed case SELECT keyword
        Select AA,BB,CC from Sys.dual

        # multiple tables
        Select A, B, C from Sys.dual, Table2

        # invalid SELECT keyword - should fail
        Xelect A, B, C from Sys.dual

        # incomplete command - should fail
        Select

        # invalid column name - should fail
        Select ^^^ frox Sys.dual

        """)

    pyparsing_common.number.runTests("""
        100
        -100
        +100
        3.14159
        6.02e23
        1e-12
        """)

    # any int or real number, returned as float
    pyparsing_common.fnumber.runTests("""
        100
        -100
        +100
        3.14159
        6.02e23
        1e-12
        """)

    pyparsing_common.hex_integer.runTests("""
        100
        FF
        """)

    import uuid
    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
    pyparsing_common.uuid.runTests("""
        12345678-1234-5678-1234-567812345678
        """)
PK�V[M�n=�=�__init__.pynu�[���PK�V[���F..
x�py31compat.pynu�[���PK�V[���	�	�extern/__init__.pynu�[���PK�V[Q��f	f	0�extern/__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK�V[Q��f	f	*��extern/__pycache__/__init__.cpython-38.pycnu�[���PK�V[@շKXX%m�__pycache__/py31compat.cpython-38.pycnu�[���PK�V[@շKXX+�__pycache__/py31compat.cpython-38.opt-1.pycnu�[���PK�V[\0�[��)��__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK�V[\0�[��#*U__pycache__/__init__.cpython-38.pycnu�[���PK�V[��_vendor/__init__.pynu�[���PK�V[�v����_vendor/packaging/__init__.pynu�[���PK�V[�iJ�\\�_vendor/packaging/_compat.pynu�[���PK�V[����� ��_vendor/packaging/_structures.pynu�[���PK�V[Иk�&&9��_vendor/packaging/__pycache__/requirements.cpython-38.pycnu�[���PK�V[��Γ�"�"4!�_vendor/packaging/__pycache__/markers.cpython-38.pycnu�[���PK�V[xL����6\_vendor/packaging/__pycache__/__about__.cpython-38.pycnu�[���PK�V[�S�!!;�_vendor/packaging/__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK�V[fa���
�
8"_vendor/packaging/__pycache__/_structures.cpython-38.pycnu�[���PK�V[M?g�)�)4D-_vendor/packaging/__pycache__/version.cpython-38.pycnu�[���PK�V[�u`��42W_vendor/packaging/__pycache__/_compat.cpython-38.pycnu�[���PK�V[E�KMKM7k[_vendor/packaging/__pycache__/specifiers.cpython-38.pycnu�[���PK�V[fa���
�
>�_vendor/packaging/__pycache__/_structures.cpython-38.opt-1.pycnu�[���PK�V[�u`��:V�_vendor/packaging/__pycache__/_compat.cpython-38.opt-1.pycnu�[���PK�V[xL����<��_vendor/packaging/__pycache__/__about__.cpython-38.opt-1.pycnu�[���PK�V[���K��8Ļ_vendor/packaging/__pycache__/utils.cpython-38.opt-1.pycnu�[���PK�V[E�KMKM=��_vendor/packaging/__pycache__/specifiers.cpython-38.opt-1.pycnu�[���PK�V[Иk�&&?�_vendor/packaging/__pycache__/requirements.cpython-38.opt-1.pycnu�[���PK�V[���K��2K_vendor/packaging/__pycache__/utils.cpython-38.pycnu�[���PK�V[M?g�)�):_vendor/packaging/__pycache__/version.cpython-38.opt-1.pycnu�[���PK�V[�r"r":sG_vendor/packaging/__pycache__/markers.cpython-38.opt-1.pycnu�[���PK�V[�S�!!5Oj_vendor/packaging/__pycache__/__init__.cpython-38.pycnu�[���PK�V[��'���l_vendor/packaging/utils.pynu�[���PK�V[0��8 8 �n_vendor/packaging/markers.pynu�[���PK�V[�ơ$-$-H�_vendor/packaging/version.pynu�[���PK�V[���!��_vendor/packaging/requirements.pynu�[���PK�V[|E��ymym�_vendor/packaging/specifiers.pynu�[���PK�V[<)X����;_vendor/packaging/__about__.pynu�[���PK�V[�XMZ�u�u�>_vendor/six.pynu�[���PK�V[�h�PP*´_vendor/__pycache__/appdirs.cpython-38.pycnu�[���PK�V[#R���1:_vendor/__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK�V[�h�PP08_vendor/__pycache__/appdirs.cpython-38.opt-1.pycnu�[���PK�V[:��-n_n_&�V_vendor/__pycache__/six.cpython-38.pycnu�[���PK�V[�@sr��,z�_vendor/__pycache__/pyparsing.cpython-38.pycnu�[���PK�V[:��-n_n_,x�_vendor/__pycache__/six.cpython-38.opt-1.pycnu�[���PK�V[n�a`��2B*_vendor/__pycache__/pyparsing.cpython-38.opt-1.pycnu�[���PK�V[#R���+F>_vendor/__pycache__/__init__.cpython-38.pycnu�[���PK�V[�޽g`g`>?_vendor/appdirs.pynu�[���PK�V[f��w�w��_vendor/pyparsing.pynu�[���PK00��*