Current File : /home/mmdealscpanel/yummmdeals.com/pgen2.tar
__init__.py000064400000000217150467362370006672 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""The pgen2 package."""
parse.py000064400000017733150467362370006260 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

"""

# Local imports
from . import token

class ParseError(Exception):
    """Exception to signal the parser is stuck."""

    def __init__(self, msg, type, value, context):
        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
                           (msg, type, value, context))
        self.msg = msg
        self.type = type
        self.value = value
        self.context = context

    def __reduce__(self):
        return type(self), (self.msg, self.type, self.value, self.context)

class Parser(object):
    """Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    """

    def __init__(self, grammar, convert=None):
        """Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        """
        self.grammar = grammar
        self.convert = convert or (lambda grammar, node: node)

    def setup(self, start=None):
        """Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        """
        if start is None:
            start = self.grammar.start
        # Each stack entry is a tuple: (dfa, state, node).
        # A node is a tuple: (type, value, context, children),
        # where children is a list of nodes or None, and context may be None.
        newnode = (start, None, None, [])
        stackentry = (self.grammar.dfas[start], 0, newnode)
        self.stack = [stackentry]
        self.rootnode = None
        self.used_names = set() # Aliased to self.rootnode.used_names in pop()

    def addtoken(self, type, value, context):
        """Add a token; return True iff this is the end of the program."""
        # Map from token to label
        ilabel = self.classify(type, value, context)
        # Loop until the token is shifted; may raise exceptions
        while True:
            dfa, state, node = self.stack[-1]
            states, first = dfa
            arcs = states[state]
            # Look for a state with this label
            for i, newstate in arcs:
                t, v = self.grammar.labels[i]
                if ilabel == i:
                    # Look it up in the list of labels
                    assert t < 256
                    # Shift a token; we're done with it
                    self.shift(type, value, newstate, context)
                    # Pop while we are in an accept-only state
                    state = newstate
                    while states[state] == [(0, state)]:
                        self.pop()
                        if not self.stack:
                            # Done parsing!
                            return True
                        dfa, state, node = self.stack[-1]
                        states, first = dfa
                    # Done with this token
                    return False
                elif t >= 256:
                    # See if it's a symbol and if we're in its first set
                    itsdfa = self.grammar.dfas[t]
                    itsstates, itsfirst = itsdfa
                    if ilabel in itsfirst:
                        # Push a symbol
                        self.push(t, self.grammar.dfas[t], newstate, context)
                        break # To continue the outer while loop
            else:
                if (0, state) in arcs:
                    # An accepting state, pop it and try something else
                    self.pop()
                    if not self.stack:
                        # Done parsing, but another token is input
                        raise ParseError("too much input",
                                         type, value, context)
                else:
                    # No success finding a transition
                    raise ParseError("bad input", type, value, context)

    def classify(self, type, value, context):
        """Turn a token into a label.  (Internal)"""
        if type == token.NAME:
            # Keep a listing of all used names
            self.used_names.add(value)
            # Check for reserved words
            ilabel = self.grammar.keywords.get(value)
            if ilabel is not None:
                return ilabel
        ilabel = self.grammar.tokens.get(type)
        if ilabel is None:
            raise ParseError("bad token", type, value, context)
        return ilabel

    def shift(self, type, value, newstate, context):
        """Shift a token.  (Internal)"""
        dfa, state, node = self.stack[-1]
        newnode = (type, value, context, None)
        newnode = self.convert(self.grammar, newnode)
        if newnode is not None:
            node[-1].append(newnode)
        self.stack[-1] = (dfa, newstate, node)

    def push(self, type, newdfa, newstate, context):
        """Push a nonterminal.  (Internal)"""
        dfa, state, node = self.stack[-1]
        newnode = (type, None, context, [])
        self.stack[-1] = (dfa, newstate, node)
        self.stack.append((newdfa, 0, newnode))

    def pop(self):
        """Pop a nonterminal.  (Internal)"""
        popdfa, popstate, popnode = self.stack.pop()
        newnode = self.convert(self.grammar, popnode)
        if newnode is not None:
            if self.stack:
                dfa, state, node = self.stack[-1]
                node[-1].append(newnode)
            else:
                self.rootnode = newnode
                self.rootnode.used_names = self.used_names
conv.py000064400000022652150467362370006107 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

"""

# Python imports
import re

# Local imports
from pgen2 import grammar, token


class Converter(grammar.Grammar):
    """Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    """

    def run(self, graminit_h, graminit_c):
        """Load the grammar tables from the text files written by pgen."""
        self.parse_graminit_h(graminit_h)
        self.parse_graminit_c(graminit_c)
        self.finish_off()

    def parse_graminit_h(self, filename):
        """Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        """
        try:
            f = open(filename)
        except OSError as err:
            print("Can't open %s: %s" % (filename, err))
            return False
        self.symbol2number = {}
        self.number2symbol = {}
        lineno = 0
        for line in f:
            lineno += 1
            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
            if not mo and line.strip():
                print("%s(%s): can't parse %s" % (filename, lineno,
                                                  line.strip()))
            else:
                symbol, number = mo.groups()
                number = int(number)
                assert symbol not in self.symbol2number
                assert number not in self.number2symbol
                self.symbol2number[symbol] = number
                self.number2symbol[number] = symbol
        return True

    def parse_graminit_c(self, filename):
        """Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        """
        try:
            f = open(filename)
        except OSError as err:
            print("Can't open %s: %s" % (filename, err))
            return False
        # The code below essentially uses f's iterator-ness!
        lineno = 0

        # Expect the two #include lines
        lineno, line = lineno+1, next(f)
        assert line == '#include "pgenheaders.h"\n', (lineno, line)
        lineno, line = lineno+1, next(f)
        assert line == '#include "grammar.h"\n', (lineno, line)

        # Parse the state definitions
        lineno, line = lineno+1, next(f)
        allarcs = {}
        states = []
        while line.startswith("static arc "):
            while line.startswith("static arc "):
                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
                              line)
                assert mo, (lineno, line)
                n, m, k = list(map(int, mo.groups()))
                arcs = []
                for _ in range(k):
                    lineno, line = lineno+1, next(f)
                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
                    assert mo, (lineno, line)
                    i, j = list(map(int, mo.groups()))
                    arcs.append((i, j))
                lineno, line = lineno+1, next(f)
                assert line == "};\n", (lineno, line)
                allarcs[(n, m)] = arcs
                lineno, line = lineno+1, next(f)
            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
            assert mo, (lineno, line)
            s, t = list(map(int, mo.groups()))
            assert s == len(states), (lineno, line)
            state = []
            for _ in range(t):
                lineno, line = lineno+1, next(f)
                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
                assert mo, (lineno, line)
                k, n, m = list(map(int, mo.groups()))
                arcs = allarcs[n, m]
                assert k == len(arcs), (lineno, line)
                state.append(arcs)
            states.append(state)
            lineno, line = lineno+1, next(f)
            assert line == "};\n", (lineno, line)
            lineno, line = lineno+1, next(f)
        self.states = states

        # Parse the dfas
        dfas = {}
        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
        assert mo, (lineno, line)
        ndfas = int(mo.group(1))
        for i in range(ndfas):
            lineno, line = lineno+1, next(f)
            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
                          line)
            assert mo, (lineno, line)
            symbol = mo.group(2)
            number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
            assert self.symbol2number[symbol] == number, (lineno, line)
            assert self.number2symbol[number] == symbol, (lineno, line)
            assert x == 0, (lineno, line)
            state = states[z]
            assert y == len(state), (lineno, line)
            lineno, line = lineno+1, next(f)
            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
            assert mo, (lineno, line)
            first = {}
            rawbitset = eval(mo.group(1))
            for i, c in enumerate(rawbitset):
                byte = ord(c)
                for j in range(8):
                    if byte & (1<<j):
                        first[i*8 + j] = 1
            dfas[number] = (state, first)
        lineno, line = lineno+1, next(f)
        assert line == "};\n", (lineno, line)
        self.dfas = dfas

        # Parse the labels
        labels = []
        lineno, line = lineno+1, next(f)
        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
        assert mo, (lineno, line)
        nlabels = int(mo.group(1))
        for i in range(nlabels):
            lineno, line = lineno+1, next(f)
            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
            assert mo, (lineno, line)
            x, y = mo.groups()
            x = int(x)
            if y == "0":
                y = None
            else:
                y = eval(y)
            labels.append((x, y))
        lineno, line = lineno+1, next(f)
        assert line == "};\n", (lineno, line)
        self.labels = labels

        # Parse the grammar struct
        lineno, line = lineno+1, next(f)
        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
        lineno, line = lineno+1, next(f)
        mo = re.match(r"\s+(\d+),$", line)
        assert mo, (lineno, line)
        ndfas = int(mo.group(1))
        assert ndfas == len(self.dfas)
        lineno, line = lineno+1, next(f)
        assert line == "\tdfas,\n", (lineno, line)
        lineno, line = lineno+1, next(f)
        mo = re.match(r"\s+{(\d+), labels},$", line)
        assert mo, (lineno, line)
        nlabels = int(mo.group(1))
        assert nlabels == len(self.labels), (lineno, line)
        lineno, line = lineno+1, next(f)
        mo = re.match(r"\s+(\d+)$", line)
        assert mo, (lineno, line)
        start = int(mo.group(1))
        assert start in self.number2symbol, (lineno, line)
        self.start = start
        lineno, line = lineno+1, next(f)
        assert line == "};\n", (lineno, line)
        try:
            lineno, line = lineno+1, next(f)
        except StopIteration:
            pass
        else:
            assert 0, (lineno, line)

    def finish_off(self):
        """Create additional useful structures.  (Internal)."""
        self.keywords = {} # map from keyword strings to arc labels
        self.tokens = {}   # map from numeric token values to arc labels
        for ilabel, (type, value) in enumerate(self.labels):
            if type == token.NAME and value is not None:
                self.keywords[value] = ilabel
            elif value is None:
                self.tokens[type] = ilabel
__pycache__/token.cpython-36.opt-1.pyc000064400000003474150467362370013506 0ustar003


 \�@sPdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<d=Z=iZ>x6e?e@�jA��D]$\ZBZCeDeC�eDd�k�reBe>eC<�qWd>d?�ZEd@dA�ZFdBdC�ZGdDS)Ez!Token constants (from "token.h").����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�cCs|tkS)N)�	NT_OFFSET)�x�r@�+/usr/lib64/python3.6/lib2to3/pgen2/token.py�
ISTERMINALNsrBcCs|tkS)N)r>)r?r@r@rA�
ISNONTERMINALQsrCcCs|tkS)N)�	ENDMARKER)r?r@r@rA�ISEOFTsrEN)H�__doc__rD�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�N_TOKENSr>�tok_name�list�globals�items�_nameZ_value�typerBrCrEr@r@r@rA�<module>s�__pycache__/pgen.cpython-36.pyc000064400000023071150467362370012353 0ustar003


 \�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.6/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_tj|j�|_|j�|j	�\|_
|_|dk	rZ|�i|_|j
�dS)N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__cCs*t�}t|jj��}|j�|j|j�|jd|j�x.|D]&}dt|j	�}||j	|<||j
|<q<Wx�|D]�}|j|}g}xl|D]d}g}x6t|jj
��D]$\}	}
|j|j||	�|j|
�f�q�W|jr�|jd|j|�f�|j|�q�W|jj|�||j||�f|j|j	|<qlW|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar+�stater%�label�nextr	r	r
�make_grammars.




  zParserGenerator.make_grammarcCs8|j|}i}x$t|�D]}|j||�}d||<qW|S)Nr)rr$r()rr.r0Zrawfirstrr4�ilabelr	r	r
r,4s
zParserGenerator.make_firstcCsbt|j�}|dj�r�||jkrZ||jkr4|j|S|jj|j|df�||j|<|Snbtt|d�}t|t	�sxt
|��|tjks�t
|��||jkr�|j|S|jj|df�||j|<|Sn�|ddks�t
|��t
|�}|dj��r ||jk�r�|j|S|jjtj|f�||j|<|Sn>tj|}||jk�r@|j|S|jj|df�||j|<|SdS)Nr�"�')r8r9)r"Zlabels�isalphar#Zsymbol2labelr'�getattrr�
isinstance�int�AssertionError�tok_name�tokens�eval�keywords�NAMErZopmap)rr.r4r7Zitoken�valuer	r	r
r(=s<












zParserGenerator.make_labelcCs<t|jj��}|j�x |D]}||jkr|j|�qWdS)N)rrrrr�	calcfirst)rr/r0r	r	r
rks


zParserGenerator.addfirstsetsc	Cs
|j|}d|j|<|d}i}i}x�|jj�D]x\}}||jkr�||jkrl|j|}|dkr�td|��n|j|�|j|}|j|�|||<q0d||<|di||<q0Wi}	xJ|j�D]>\}}
x4|
D],}||	kr�td||||	|f��||	|<q�Wq�W||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr%r&�
ValueErrorrE�update)rr0r2r3ZtotalsetZoverlapcheckr4r5�fsetZinverseZitsfirstZsymbolr	r	r
rEss2









zParserGenerator.calcfirstc	Cs�i}d}x�|jtjkr�x|jtjkr.|j�qW|jtj�}|jtjd�|j�\}}|jtj�|j	||�}t
|�}|j|�t
|�}|||<|dkr
|}q
W||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectrC�OP�	parse_rhs�make_dfar"�simplify_dfa)	rrrr0�a�zr2ZoldlenZnewlenr	r	r
r�s"
zParserGenerator.parsecs�t|t�st�t|t�st��fdd�}�fdd��t||�|�g}x�|D]�}i}x<|jD]2}x,|jD]"\}}	|dk	rf�|	|j|i��qfWqZWxRt|j��D]B\}}
x,|D]}|j|
kr�Pq�Wt|
|�}|j	|�|j
||�q�WqJW|S)Ncsi}�||�|S)Nr	)r3�base)�
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecsLt|t�st�||krdSd||<x$|jD]\}}|dkr*�||�q*WdS)Nr)r<�NFAStater>r%)r3rTr4r5)rUr	r
rU�sz,ParserGenerator.make_dfa.<locals>.addclosure)r<rWr>�DFAState�nfasetr%�
setdefaultr$r&r'�addarc)rr-�finishrVr+r3r%Znfastater4r5rY�str	)rUr
rP�s&




zParserGenerator.make_dfac
Cs�td|�|g}x�t|�D]�\}}td|||kr4dp6d�x^|jD]T\}}||kr^|j|�}	nt|�}	|j|�|dkr�td|	�qBtd||	f�qBWqWdS)NzDump of NFA forz  Statez(final)�z	    -> %dz    %s -> %d)�print�	enumerater%r)r"r')
rr0r-r\Ztodor1r3r4r5�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsltd|�x\t|�D]P\}}td||jr,dp.d�x0t|jj��D]\}}td||j|�f�qBWqWdS)NzDump of DFA forz  Statez(final)r^z    %s -> %d)r_r`r*r$r%r&r))rr0r2r1r3r4r5r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCs~d}xt|rxd}xft|�D]Z\}}xPt|dt|��D]:}||}||kr4||=x|D]}|j||�qTWd}Pq4WqWqWdS)NTFr)r`�ranger"�
unifystate)rr2Zchangesr1Zstate_iraZstate_jr3r	r	r
rQ�s
zParserGenerator.simplify_dfacCs�|j�\}}|jdkr||fSt�}t�}|j|�|j|�x6|jdkrt|j�|j�\}}|j|�|j|�q@W||fSdS)N�|)�	parse_altrDrWr[r)rrRrSZaaZzzr	r	r
rO�s



zParserGenerator.parse_rhscCsP|j�\}}x:|jdks*|jtjtjfkrF|j�\}}|j|�|}qW||fS)N�(�[)rhri)�
parse_itemrDrJrrC�STRINGr[)rrR�br.�dr	r	r
rg
s
zParserGenerator.parse_altcCs�|jdkr>|j�|j�\}}|jtjd�|j|�||fS|j�\}}|j}|dkr`||fS|j�|j|�|dkr�||fS||fSdS)Nri�]�+�*)rorp)rDrrOrMrrNr[�
parse_atom)rrRrSrDr	r	r
rjs


zParserGenerator.parse_itemcCs�|jdkr4|j�|j�\}}|jtjd�||fS|jtjtjfkrpt	�}t	�}|j
||j�|j�||fS|jd|j|j�dS)Nrh�)z+expected (...) or NAME or STRING, got %s/%s)rDrrOrMrrNrJrCrkrWr[�raise_error)rrRrSr	r	r
rq(s
zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|jd|||j|j�|j}|j�|S)Nzexpected %s/%s, got %s/%s)rJrDrsr)rrJrDr	r	r
rM9szParserGenerator.expectcCsJt|j�}x"|dtjtjfkr,t|j�}qW|\|_|_|_|_|_	dS)Nr)
r5rr�COMMENT�NLrJrDZbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokencGs^|r8y||}Wn&dj|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrrvrw)r�msg�argsr	r	r
rsHs zParserGenerator.raise_error)N)N)rrrrr6r,r(rrErrPrbrcrQrOrgrjrqrMrrsr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)rWcCs
g|_dS)N)r%)rr	r	r
rSszNFAState.__init__NcCs8|dkst|t�st�t|t�s$t�|jj||f�dS)N)r<r|r>rWr%r')rr5r4r	r	r
r[VszNFAState.addarc)N)rrrrr[r	r	r	r
rWQsrWc@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
rXcCsLt|t�st�ttt|��t�s$t�t|t�s2t�||_||k|_i|_dS)N)	r<�dictr>r5�iterrWrYr*r%)rrY�finalr	r	r
r]s
zDFAState.__init__cCs8t|t�st�||jkst�t|t�s*t�||j|<dS)N)r<r|r>r%rX)rr5r4r	r	r
r[eszDFAState.addarccCs.x(|jj�D]\}}||kr||j|<qWdS)N)r%r&)r�old�newr4r5r	r	r
rekszDFAState.unifystatecCsft|t�st�|j|jkrdSt|j�t|j�kr6dSx*|jj�D]\}}||jj|�k	rBdSqBWdS)NFT)r<rXr>r*r"r%r&�get)r�otherr4r5r	r	r
�__eq__pszDFAState.__eq__N)rrrrr[rer��__hash__r	r	r	r
rX[s
rX�Grammar.txtcCst|�}|j�S)N)rr6)r�pr	r	r
�generate_grammar�sr�N)r�)r^rrrZGrammarr�objectrrWrXr�r	r	r	r
�<module>sI
%__pycache__/pgen.cpython-36.opt-1.pyc000064400000022165150467362370013315 0ustar003


 \�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.6/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_tj|j�|_|j�|j	�\|_
|_|dk	rZ|�i|_|j
�dS)N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__cCs*t�}t|jj��}|j�|j|j�|jd|j�x.|D]&}dt|j	�}||j	|<||j
|<q<Wx�|D]�}|j|}g}xl|D]d}g}x6t|jj
��D]$\}	}
|j|j||	�|j|
�f�q�W|jr�|jd|j|�f�|j|�q�W|jj|�||j||�f|j|j	|<qlW|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar+�stater%�label�nextr	r	r
�make_grammars.




  zParserGenerator.make_grammarcCs8|j|}i}x$t|�D]}|j||�}d||<qW|S)Nr)rr$r()rr.r0Zrawfirstrr4�ilabelr	r	r
r,4s
zParserGenerator.make_firstcCs&t|j�}|dj�r�||jkrZ||jkr4|j|S|jj|j|df�||j|<|Sn>tt|d�}||jkrz|j|S|jj|df�||j|<|Sn�t	|�}|dj�r�||j
kr�|j
|S|jjtj|f�||j
|<|Sn>tj
|}||jk�r|j|S|jj|df�||j|<|SdS)Nr)r"Zlabels�isalphar#Zsymbol2labelr'�getattrr�tokens�eval�keywords�NAMErZopmap)rr.r4r7Zitoken�valuer	r	r
r(=s6













zParserGenerator.make_labelcCs<t|jj��}|j�x |D]}||jkr|j|�qWdS)N)rrrrr�	calcfirst)rr/r0r	r	r
rks


zParserGenerator.addfirstsetsc	Cs
|j|}d|j|<|d}i}i}x�|jj�D]x\}}||jkr�||jkrl|j|}|dkr�td|��n|j|�|j|}|j|�|||<q0d||<|di||<q0Wi}	xJ|j�D]>\}}
x4|
D],}||	kr�td||||	|f��||	|<q�Wq�W||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr%r&�
ValueErrorr?�update)rr0r2r3ZtotalsetZoverlapcheckr4r5�fsetZinverseZitsfirstZsymbolr	r	r
r?ss2









zParserGenerator.calcfirstc	Cs�i}d}x�|jtjkr�x|jtjkr.|j�qW|jtj�}|jtjd�|j�\}}|jtj�|j	||�}t
|�}|j|�t
|�}|||<|dkr
|}q
W||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectr=�OP�	parse_rhs�make_dfar"�simplify_dfa)	rrrr0�a�zr2ZoldlenZnewlenr	r	r
r�s"
zParserGenerator.parsecs��fdd�}�fdd��t||�|�g}x�|D]�}i}x<|jD]2}x,|jD]"\}}	|dk	rJ�|	|j|i��qJWq>WxRt|j��D]B\}}
x,|D]}|j|
kr�Pq�Wt|
|�}|j|�|j||�q�Wq.W|S)Ncsi}�||�|S)Nr	)r3�base)�
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecs>||krdSd||<x$|jD]\}}|dkr�||�qWdS)Nr)r%)r3rNr4r5)rOr	r
rO�sz,ParserGenerator.make_dfa.<locals>.addclosure)�DFAState�nfasetr%�
setdefaultr$r&r'�addarc)rr-�finishrPr+r3r%Znfastater4r5rR�str	)rOr
rJ�s"




zParserGenerator.make_dfac
Cs�td|�|g}x�t|�D]�\}}td|||kr4dp6d�x^|jD]T\}}||kr^|j|�}	nt|�}	|j|�|dkr�td|	�qBtd||	f�qBWqWdS)NzDump of NFA forz  Statez(final)�z	    -> %dz    %s -> %d)�print�	enumerater%r)r"r')
rr0r-rUZtodor1r3r4r5�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsltd|�x\t|�D]P\}}td||jr,dp.d�x0t|jj��D]\}}td||j|�f�qBWqWdS)NzDump of DFA forz  Statez(final)rWz    %s -> %d)rXrYr*r$r%r&r))rr0r2r1r3r4r5r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCs~d}xt|rxd}xft|�D]Z\}}xPt|dt|��D]:}||}||kr4||=x|D]}|j||�qTWd}Pq4WqWqWdS)NTFr)rY�ranger"�
unifystate)rr2Zchangesr1Zstate_irZZstate_jr3r	r	r
rK�s
zParserGenerator.simplify_dfacCs�|j�\}}|jdkr||fSt�}t�}|j|�|j|�x6|jdkrt|j�|j�\}}|j|�|j|�q@W||fSdS)N�|)�	parse_altr>�NFAStaterTr)rrLrMZaaZzzr	r	r
rI�s



zParserGenerator.parse_rhscCsP|j�\}}x:|jdks*|jtjtjfkrF|j�\}}|j|�|}qW||fS)N�(�[)rbrc)�
parse_itemr>rDrr=�STRINGrT)rrL�br.�dr	r	r
r`
s
zParserGenerator.parse_altcCs�|jdkr>|j�|j�\}}|jtjd�|j|�||fS|j�\}}|j}|dkr`||fS|j�|j|�|dkr�||fS||fSdS)Nrc�]�+�*)rirj)r>rrIrGrrHrT�
parse_atom)rrLrMr>r	r	r
rds


zParserGenerator.parse_itemcCs�|jdkr4|j�|j�\}}|jtjd�||fS|jtjtjfkrpt	�}t	�}|j
||j�|j�||fS|jd|j|j�dS)Nrb�)z+expected (...) or NAME or STRING, got %s/%s)r>rrIrGrrHrDr=rerarT�raise_error)rrLrMr	r	r
rk(s
zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|jd|||j|j�|j}|j�|S)Nzexpected %s/%s, got %s/%s)rDr>rmr)rrDr>r	r	r
rG9szParserGenerator.expectcCsJt|j�}x"|dtjtjfkr,t|j�}qW|\|_|_|_|_|_	dS)Nr)
r5rr�COMMENT�NLrDr>Zbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokencGs^|r8y||}Wn&dj|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrrprq)r�msg�argsr	r	r
rmHs zParserGenerator.raise_error)N)N)rrrrr6r,r(rr?rrJr[r\rKrIr`rdrkrGrrmr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)racCs
g|_dS)N)r%)rr	r	r
rSszNFAState.__init__NcCs|jj||f�dS)N)r%r')rr5r4r	r	r
rTVszNFAState.addarc)N)rrrrrTr	r	r	r
raQsrac@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
rQcCs||_||k|_i|_dS)N)rRr*r%)rrR�finalr	r	r
r]s
zDFAState.__init__cCs||j|<dS)N)r%)rr5r4r	r	r
rTeszDFAState.addarccCs.x(|jj�D]\}}||kr||j|<qWdS)N)r%r&)r�old�newr4r5r	r	r
r^kszDFAState.unifystatecCsX|j|jkrdSt|j�t|j�kr(dSx*|jj�D]\}}||jj|�k	r4dSq4WdS)NFT)r*r"r%r&�get)r�otherr4r5r	r	r
�__eq__pszDFAState.__eq__N)rrrrrTr^r�__hash__r	r	r	r
rQ[s
rQ�Grammar.txtcCst|�}|j�S)N)rr6)r�pr	r	r
�generate_grammar�sr�N)r�)rWrrrZGrammarr�objectrrarQr�r	r	r	r
�<module>sI
%__pycache__/literals.cpython-36.pyc000064400000002776150467362370013252 0ustar003


 \O�@sPdZddlZddddddd	d
ddd
�
Zdd�Zdd�Zdd�ZedkrLe�dS)z<Safely evaluate Python string literals without using eval().�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|jdd�\}}|jd�st�tj|�}|dk	r4|S|jd�r�|dd�}t|�dkrbtd|��yt|d�}Wq�tk
r�td|��Yq�Xn0yt|d�}Wn tk
r�td	|��YnXt|�S)
Nr�r�x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))	�group�
startswith�AssertionError�simple_escapes�get�len�
ValueError�int�chr)�m�all�tail�escZhexes�i�r&�./usr/lib64/python3.6/lib2to3/pgen2/literals.py�escapes$

r(cCs�|jd�s(|jd�s(tt|dd����|d}|dd�|dkrL|d}|j|�sptt|t|�d����t|�dt|�ks�t�|t|�t|��}tjdt|�S)Nr	r
rr�rz)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))rr�repr�endswithr�re�subr()�s�qr&r&r'�
evalString(s($r0cCsDx>td�D]2}t|�}t|�}t|�}||kr
t||||�q
WdS)N�)�ranger r*r0�print)r%�cr.�er&r&r'�test2sr6�__main__)�__doc__r,rr(r0r6�__name__r&r&r&r'�<module>s 
	__pycache__/__init__.cpython-36.opt-2.pyc000064400000000203150467362370014111 0ustar003


 \��@sdS)N�rrr�./usr/lib64/python3.6/lib2to3/pgen2/__init__.py�<module>s__pycache__/tokenize.cpython-36.pyc000064400000036032150467362370013253 0ustar003


 \NX�=@s�dZdZdZddlZddlZddlmZmZddlTddl	m
Z
d	d
�ee
�D�ddd
gZ[
ye
Wnek
r~eZ
YnXdd�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZedd�ee�Zd eZeee�Zed!ed"�Z ee ee�Z!d#Z"d$Z#d%Z$d&Z%d'Z&ee&d(e&d)�Z'ee&d*e&d+�Z(ed,d-d.d/d0d1d2d3d4�	Z)d5Z*ed6d7�Z+ee)e*e+�Z,ee!e,e(e�Z-ee-Z.ee&d8ed9d�e&d:ed;d��Z/edee'�Z0eee0e!e,e/e�Z1e2e3ej4e.e1e$e%f��\Z5Z6Z7Z8ej4e"�ej4e#�e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8ddddddddd<�4Z9iZ:xd�D]Z;e;e:e;<�q�WiZ<xd�D]Z;e;e<e;<�q�Wd�Z=Gd�d��d�e>�Z?Gd�d��d�e>�Z@d�d��ZAeAfd�d�ZBd�d��ZCGd�d��d��ZDej4d�ejE�ZFej4d�ejE�ZGd�d��ZHd�d��ZId�d
�ZJd�d�ZKeLd�k�r�ddlMZMeNeMjO�dk�r|eBePeMjOd�jQ�neBeMjRjQ�dS)�a�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.zKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.6/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsddj|�dS)N�(�|�))�join)�choicesrrr�group0srcGst|�dS)Nr)r)rrrr�any1srcGst|�dS)N�?)r)rrrr�maybe2srz[ \f\t]*z	#[^\r\n]*z\\\r?\nz[a-zA-Z_]\w*z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z#(?:[uUrRbBfF]|[rR][bB]|[bBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�")4rrz'''z"""zr'''zr"""zu'''zu"""zb'''zb"""zf'''zf"""zur'''zur"""zbr'''zbr"""zrb'''zrb"""zR'''zR"""zU'''zU"""zB'''zB"""zF'''zF"""zuR'''zuR"""zUr'''zUr"""zUR'''zUR"""zbR'''zbR"""zBr'''zBr"""zBR'''zBR"""zrB'''zrB"""zRb'''zRb"""zRB'''zRB"""�r�R�u�U�f�F�b�B�r'''�r"""�R'''�R"""�u'''�u"""�U'''�U"""�b'''�b"""�B'''�B"""�f'''�f"""�F'''�F"""�ur'''�ur"""�Ur'''�Ur"""�uR'''�uR"""�UR'''�UR"""�br'''�br"""�Br'''�Br"""�bR'''�bR"""�BR'''�BR"""�rb'''�rb"""�Rb'''�Rb"""�rB'''�rB"""�RB'''�RB"""�r'�r"�R'�R"�u'�u"�U'�U"�b'�b"�B'�B"�f'�f"�F'�F"�ur'�ur"�Ur'�Ur"�uR'�uR"�UR'�UR"�br'�br"�Br'�Br"�bR'�bR"�BR'�BR"�rb'�rb"�Rb'�Rb"�rB'�rB"�RB'�RB"�c@seZdZdS)�
TokenErrorN)�__name__�
__module__�__qualname__rrrrrw�srwc@seZdZdS)�StopTokenizingN)rxryrzrrrrr{�sr{c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�sr�cCs(yt||�Wntk
r"YnXdS)a:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N)�
tokenize_loopr{)�readline�
tokeneaterrrrr
�s
cCsxt|�D]}||�q
WdS)N)r)r�r�Z
token_inforrrr��sr�c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs8|\}}||jkst�||j}|r4|jjd|�dS)N� )r��AssertionErrorr�r��append)r��start�row�col�
col_offsetrrr�add_whitespace�s

zUntokenizer.add_whitespacecCs�xv|D]n}t|�dkr$|j||�P|\}}}}}|j|�|jj|�|\|_|_|ttfkr|jd7_d|_qWdj	|j�S)N�rr�)
�len�compatr�r�r�r�r��NEWLINE�NLr)r��iterable�t�tok_typerr��endr�rrrr�s


zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}x�|D]�}|dd�\}}|ttttfkrn|d7}|tkr�|j|�qBn>|t	kr�|j
�qBn*|ttfkr�d}n|r�|r�||d�d}||�qBWdS)NFr�Tr�r���)r�r��NAME�NUMBERr�r��ASYNC�AWAIT�INDENT�DEDENT�pop)	r�rr��	startline�indents�toks_append�toknum�tokval�tokrrrr��s0

zUntokenizer.compatN)rxryrzr�r�rr�rrrrr��sr�z&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd�j�jdd�}|dks*|jd�r.dS|d
ks@|jd�rDdS|S)z(Imitates get_normal_name in tokenizer.c.N�r�-zutf-8zutf-8-�latin-1�
iso-8859-1�iso-latin-1�latin-1-�iso-8859-1-�iso-latin-1-)r�r�r�)r�r�r�)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_names
r�cs�d�d}d}�fdd�}�fdd�}|�}|jt�rHd�|d	d�}d
}|sT|gfS||�}|rj||gfStj|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)a
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    FNzutf-8cs"y��Stk
rt�SXdS)N)�
StopIteration�bytesr)r�rr�read_or_stop sz%detect_encoding.<locals>.read_or_stopcs�y|jd�}Wntk
r"dSXtj|�}|s6dSt|jd��}yt|�}Wn tk
rptd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: zutf-8zencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchr�rr�LookupError�SyntaxError�name)r��line_stringr��encoding�codec)�	bom_foundrr�find_cookie&s"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)r�r�blank_rer�)r�r��defaultr�r��first�secondr)r�r�r�detect_encodings0




r�cCst�}|j|�S)a�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited intput:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    )r�r)r��utrrrrTsc!cs�d}}}tjdd}}d \}}d}dg}	d}
d}d}d}
�xy
|�}Wntk
rdd}YnX|d}dt|�}}|�rF|s�td|��|j|�}|r�|jd�}}t||d|�|||f||fVd!\}}d}nd|�r0|d"d�d
k�r0|d#d�dk�r0t||||t|�f|fVd}d}qBn||}||}qB�nF|dk�rt|�rt|�s`Pd}xf||k�r�||d
k�r�|d}n6||dk�r�|t	dt	}n||dk�r�d}nP|d}�qfW||k�r�P|
�r�|
Vd}
||dk�r�||dk�rh||d�j
d�}|t|�}t|||f||t|�f|fVt||d�||f|t|�f|fVqBttf||dk||d�||f|t|�f|fVqB||	d$k�r�|	j
|�t|d|�|df||f|fVxt||	d%k�rJ||	k�rtdd|||f��|	dd&�}	|�r.||	d'k�r.d}d}
d}td||f||f|fV�q�W|�r�|
�r�||	d(k�r�d}d}
d}n|�s�td|df��d}�x�||k�rJtj||�}|�r|jd�\}}||f||f|}}}|||�||}}||k�s|dk�r|dk�rt||||fV�qF|dk�rft}|dk�r8t}n
|�rBd}
|
�rR|
Vd}
|||||fV�qF|dk�r�|jd��s�t�|
�r�|
Vd}
t||||fV�qF|tk�r$t|}|j||�}|�r|jd�}|||�}|
�r�|
Vd}
t||||f|fVn||f}||d�}|}P�qF|tk�sR|dd	�tk�sR|dd�tk�r�|d)dk�r�||f}t|�p�t|d�p�t|d	}||d�d}}|}Pn |
�r�|
Vd}
t||||fV�qF||k�r�|d*k�r|�r|dk�r�tnt||||fV�q�t||||f}|dk�r.|
�r.|}
�q�|dk�r�|
�r�|
dtk�r�|
ddk�r�d}|	d+}t|
d|
d	|
d|
dfVd}
|
�r�|
Vd}
|Vnz|dk�r�|
�r�|
Vd}
t||||f|fVd}nF|dk�r�|d}n|dk�r�|d}|
�r|
Vd}
t||||fVn(t||||f||df|fV|d}�q�WqBW|
�r`|
Vd}
x.|	dd�D]} td|df|dfdfV�qnWtd|df|dfdfVdS),aT
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    logical line; continuation lines are included.
    rr�
0123456789r�NFrzEOF in multi-line stringr�z\
r�z\
r��	�z#
�#z
z3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.T�
�async�await�def��\z([{z)]})r�r)r�r������r�r�r�r�r�r�)r�r�r�)�stringZ
ascii_lettersr�r�rwr�r��STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr�r�r��IndentationErrorr��
pseudoprog�spanr�r��endswithr��
triple_quoted�endprogs�
single_quotedr�r�r��OP�	ENDMARKER)!r��lnum�parenlev�	continuedZ	namechars�numchars�contstr�needcont�contliner��stashed�	async_def�async_def_indent�async_def_nlr��pos�max�strstart�endprog�endmatchr��column�
comment_token�nl_pos�pseudomatchr��spos�eposr�initial�newliner��indentrrrrisr



*


 














�__main__)*rrr&r'r(r)r*r+r,r-r.r/r0r1r2r3r4r5r6r7r8r9r:r;r<r=r>r?r@rArBrCrDrErFrGrHrIrJrKrLrM)*rrrNrOrPrQrRrSrTrUrVrWrXrYrZr[r\r]r^r_r`rarbrcrdrerfrgrhrirjrkrlrmrnrorprqrrrsrtru)S�__doc__�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenr�r�dir�__all__r��	NameError�strrrr�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�String�Operator�Bracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�list�map�compileZ	tokenprogr�Zsingle3progZdouble3progr�r�r�r�r��	Exceptionrwr{r�r
r�r��ASCIIr�r�r�r�rrrx�sysr��argv�openr��stdinrrrr�<module>s�







8Ic
__pycache__/literals.cpython-36.opt-1.pyc000064400000002502150467362370014174 0ustar003


 \O�@sPdZddlZddddddd	d
ddd
�
Zdd�Zdd�Zdd�ZedkrLe�dS)z<Safely evaluate Python string literals without using eval().�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|jdd�\}}tj|�}|dk	r&|S|jd�r�|dd�}t|�dkrTtd|��yt|d�}Wq�tk
r�td|��Yq�Xn0yt|d�}Wn tk
r�td|��YnXt|�S)	Nr��x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))�group�simple_escapes�get�
startswith�len�
ValueError�int�chr)�m�all�tail�escZhexes�i�r%�./usr/lib64/python3.6/lib2to3/pgen2/literals.py�escapes"

r'cCsH|d}|dd�|dkr$|d}|t|�t|��}tjdt|�S)Nr�z)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))r�re�subr')�s�qr%r%r&�
evalString(s
r-cCsDx>td�D]2}t|�}t|�}t|�}||kr
t||||�q
WdS)N�)�ranger�reprr-�print)r$�cr+�er%r%r&�test2sr4�__main__)�__doc__r)rr'r-r4�__name__r%r%r%r&�<module>s 
	__pycache__/grammar.cpython-36.pyc000064400000015621150467362370013052 0ustar003


 \��@sxdZddlZddlZddlmZmZGdd�de�Zdd�Zd	Z	iZ
x.e	j�D]"ZerNej
�\ZZeee�e
e<qNWdS)
a�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

�N�)�token�tokenizec@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)�Grammara�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)Nr�EMPTY�)rr)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.6/lib2to3/pgen2/grammar.py�__init__MszGrammar.__init__cCs2t|d��}t|j�}tj||d�WdQRXdS)a�Dump the grammar tables to a pickle file.

        dump() recursively changes all dict to OrderedDict, so the pickled file
        is not exactly the same as what was passed in to dump(). load() uses the
        pickled file to create the tables, but  only changes OrderedDict to dict
        at the top level; it does not recursively change OrderedDict to dict.
        So, the loaded tables are different from the original tables that were
        passed to load() in that some of the OrderedDict (from the pickled file)
        are not changed back to dict. For parsing, this has no effect on
        performance because OrderedDict uses dict's __getitem__ with nothing in
        between.
        �wb�N)�open�_make_deterministic�__dict__�pickle�dump)r�filename�f�drrrrXs

zGrammar.dumpc	Cs0t|d��}tj|�}WdQRX|jj|�dS)z+Load the grammar tables from a pickle file.�rbN)rr�loadr�update)rrrrrrrr iszGrammar.loadcCs|jjtj|��dS)z3Load the grammar tables from a pickle bytes object.N)rr!r�loads)rZpklrrrr"osz
Grammar.loadscCsX|j�}x"dD]}t||t||�j��qW|jdd�|_|jdd�|_|j|_|S)	z#
        Copy the grammar.
        rr	rr
rrN)rr	rr
rr)�	__class__�setattr�getattr�copyrr
r)r�newZ	dict_attrrrrr&sszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�d	S)
z:Dump the grammar tables to standard output, for debugging.r)�pprintZs2nZn2sr
rrrN)r(�printrr	r
rrr)rr(rrr�report�s




zGrammar.reportN)
�__name__�
__module__�__qualname__�__doc__rrr r"r&r*rrrrrs4
rcCs^t|t�r&tjtdd�|j�D���St|t�r>dd�|D�St|t�rZtdd�|D��S|S)Ncss|]\}}|t|�fVqdS)N)r)�.0�k�vrrr�	<genexpr>�sz&_make_deterministic.<locals>.<genexpr>cSsg|]}t|��qSr)r)r/�errr�
<listcomp>�sz'_make_deterministic.<locals>.<listcomp>css|]}t|�VqdS)N)r)r/r3rrrr2�s)�
isinstance�dict�collections�OrderedDict�sorted�items�list�tuple)�toprrrr�s


ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
)r.r7r�rr�objectrrZ	opmap_rawZopmap�
splitlines�line�split�op�namer%rrrr�<module>
sy=__pycache__/tokenize.cpython-36.opt-1.pyc000064400000035722150467362370014217 0ustar003


 \NX�=@s�dZdZdZddlZddlZddlmZmZddlTddl	m
Z
d	d
�ee
�D�ddd
gZ[
ye
Wnek
r~eZ
YnXdd�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZedd�ee�Zd eZeee�Zed!ed"�Z ee ee�Z!d#Z"d$Z#d%Z$d&Z%d'Z&ee&d(e&d)�Z'ee&d*e&d+�Z(ed,d-d.d/d0d1d2d3d4�	Z)d5Z*ed6d7�Z+ee)e*e+�Z,ee!e,e(e�Z-ee-Z.ee&d8ed9d�e&d:ed;d��Z/edee'�Z0eee0e!e,e/e�Z1e2e3ej4e.e1e$e%f��\Z5Z6Z7Z8ej4e"�ej4e#�e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8e7e8ddddddddd<�4Z9iZ:xd�D]Z;e;e:e;<�q�WiZ<xd�D]Z;e;e<e;<�q�Wd�Z=Gd�d��d�e>�Z?Gd�d��d�e>�Z@d�d��ZAeAfd�d�ZBd�d��ZCGd�d��d��ZDej4d�ejE�ZFej4d�ejE�ZGd�d��ZHd�d��ZId�d
�ZJd�d�ZKeLd�k�r�ddlMZMeNeMjO�dk�r|eBePeMjOd�jQ�neBeMjRjQ�dS)�a�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.zKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.6/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsddj|�dS)N�(�|�))�join)�choicesrrr�group0srcGst|�dS)Nr)r)rrrr�any1srcGst|�dS)N�?)r)rrrr�maybe2srz[ \f\t]*z	#[^\r\n]*z\\\r?\nz[a-zA-Z_]\w*z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z#(?:[uUrRbBfF]|[rR][bB]|[bBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�")4rrz'''z"""zr'''zr"""zu'''zu"""zb'''zb"""zf'''zf"""zur'''zur"""zbr'''zbr"""zrb'''zrb"""zR'''zR"""zU'''zU"""zB'''zB"""zF'''zF"""zuR'''zuR"""zUr'''zUr"""zUR'''zUR"""zbR'''zbR"""zBr'''zBr"""zBR'''zBR"""zrB'''zrB"""zRb'''zRb"""zRB'''zRB"""�r�R�u�U�f�F�b�B�r'''�r"""�R'''�R"""�u'''�u"""�U'''�U"""�b'''�b"""�B'''�B"""�f'''�f"""�F'''�F"""�ur'''�ur"""�Ur'''�Ur"""�uR'''�uR"""�UR'''�UR"""�br'''�br"""�Br'''�Br"""�bR'''�bR"""�BR'''�BR"""�rb'''�rb"""�Rb'''�Rb"""�rB'''�rB"""�RB'''�RB"""�r'�r"�R'�R"�u'�u"�U'�U"�b'�b"�B'�B"�f'�f"�F'�F"�ur'�ur"�Ur'�Ur"�uR'�uR"�UR'�UR"�br'�br"�Br'�Br"�bR'�bR"�BR'�BR"�rb'�rb"�Rb'�Rb"�rB'�rB"�RB'�RB"�c@seZdZdS)�
TokenErrorN)�__name__�
__module__�__qualname__rrrrrw�srwc@seZdZdS)�StopTokenizingN)rxryrzrrrrr{�sr{c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�sr�cCs(yt||�Wntk
r"YnXdS)a:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N)�
tokenize_loopr{)�readline�
tokeneaterrrrr
�s
cCsxt|�D]}||�q
WdS)N)r)r�r�Z
token_inforrrr��sr�c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs*|\}}||j}|r&|jjd|�dS)N� )r�r��append)r��start�row�col�
col_offsetrrr�add_whitespace�s
zUntokenizer.add_whitespacecCs�xv|D]n}t|�dkr$|j||�P|\}}}}}|j|�|jj|�|\|_|_|ttfkr|jd7_d|_qWdj	|j�S)N�rr�)
�len�compatr�r�r�r�r��NEWLINE�NLr)r��iterable�t�tok_typerr��endr�rrrr�s


zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}x�|D]�}|dd�\}}|ttttfkrn|d7}|tkr�|j|�qBn>|t	kr�|j
�qBn*|ttfkr�d}n|r�|r�||d�d}||�qBWdS)NFr�Tr�r���)r�r��NAME�NUMBERr�r��ASYNC�AWAIT�INDENT�DEDENT�pop)	r�rr��	startline�indents�toks_append�toknum�tokval�tokrrrr��s0

zUntokenizer.compatN)rxryrzr�r�rr�rrrrr��sr�z&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd�j�jdd�}|dks*|jd�r.dS|d
ks@|jd�rDdS|S)z(Imitates get_normal_name in tokenizer.c.N�r�-zutf-8zutf-8-�latin-1�
iso-8859-1�iso-latin-1�latin-1-�iso-8859-1-�iso-latin-1-)r�r�r�)r�r�r�)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_names
r�cs�d�d}d}�fdd�}�fdd�}|�}|jt�rHd�|d	d�}d
}|sT|gfS||�}|rj||gfStj|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)a
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    FNzutf-8cs"y��Stk
rt�SXdS)N)�
StopIteration�bytesr)r�rr�read_or_stop sz%detect_encoding.<locals>.read_or_stopcs�y|jd�}Wntk
r"dSXtj|�}|s6dSt|jd��}yt|�}Wn tk
rptd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: zutf-8zencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchr�rr�LookupError�SyntaxError�name)r��line_stringr��encoding�codec)�	bom_foundrr�find_cookie&s"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)r�r�blank_rer�)r�r��defaultr�r��first�secondr)r�r�r�detect_encodings0




r�cCst�}|j|�S)a�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited intput:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    )r�r)r��utrrrrTsc!cs�d}}}tjdd}}d \}}d}dg}	d}
d}d}d}
�x�y
|�}Wntk
rdd}YnX|d}dt|�}}|�rF|s�td|��|j|�}|r�|jd�}}t||d|�|||f||fVd!\}}d}nd|�r0|d"d�d
k�r0|d#d�dk�r0t||||t|�f|fVd}d}qBn||}||}qB�nF|dk�rt|�rt|�s`Pd}xf||k�r�||d
k�r�|d}n6||dk�r�|t	dt	}n||dk�r�d}nP|d}�qfW||k�r�P|
�r�|
Vd}
||dk�r�||dk�rh||d�j
d�}|t|�}t|||f||t|�f|fVt||d�||f|t|�f|fVqBttf||dk||d�||f|t|�f|fVqB||	d$k�r�|	j
|�t|d|�|df||f|fVxt||	d%k�rJ||	k�rtdd|||f��|	dd&�}	|�r.||	d'k�r.d}d}
d}td||f||f|fV�q�W|�r�|
�r�||	d(k�r�d}d}
d}n|�s�td|df��d}�x�||k�r8tj||�}|�r|jd�\}}||f||f|}}}|||�||}}||k�s|dk�r|dk�rt||||fV�q4|dk�rft}|dk�r8t}n
|�rBd}
|
�rR|
Vd}
|||||fV�q4|dk�r�|
�r�|
Vd}
t||||fV�q4|tk�rt|}|j||�}|�r�|jd�}|||�}|
�r�|
Vd}
t||||f|fVn||f}||d�}|}P�q4|tk�s@|dd	�tk�s@|dd�tk�r�|d)dk�r�||f}t|�pxt|d�pxt|d	}||d�d}}|}Pn |
�r�|
Vd}
t||||fV�q4||k�r�|d*k�r�|�r�|dk�r�tnt||||fV�q�t||||f}|dk�r|
�r|}
�q�|dk�rx|
�rx|
dtk�rx|
ddk�rxd}|	d+}t|
d|
d	|
d|
dfVd}
|
�r�|
Vd}
|Vnz|dk�r�|
�r�|
Vd}
t||||f|fVd}nF|dk�r�|d}n|dk�r�|d}|
�r�|
Vd}
t||||fVn(t||||f||df|fV|d}�q�WqBW|
�rN|
Vd}
x.|	dd�D]} td|df|dfdfV�q\Wtd|df|dfdfVdS),aT
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    logical line; continuation lines are included.
    rr�
0123456789r�NFrzEOF in multi-line stringr�z\
r�z\
r��	�z#
�#z
z3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.T�
�async�await�def��\z([{z)]})r�r)r�r������r�r�r�r�r�r�)r�r�r�)�stringZ
ascii_lettersr�r�rwr�r��STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr�r�r��IndentationErrorr��
pseudoprog�spanr�r��
triple_quoted�endprogs�
single_quotedr�r�r��OP�	ENDMARKER)!r��lnum�parenlev�	continuedZ	namechars�numchars�contstr�needcont�contliner��stashed�	async_def�async_def_indent�async_def_nlr��pos�max�strstart�endprog�endmatchr��column�
comment_token�nl_pos�pseudomatchr��spos�eposr�initial�newliner��indentrrrrisp



*


 














�__main__)*rrr&r'r(r)r*r+r,r-r.r/r0r1r2r3r4r5r6r7r8r9r:r;r<r=r>r?r@rArBrCrDrErFrGrHrIrJrKrLrM)*rrrNrOrPrQrRrSrTrUrVrWrXrYrZr[r\r]r^r_r`rarbrcrdrerfrgrhrirjrkrlrmrnrorprqrrrsrtru)S�__doc__�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenr�r�dir�__all__r��	NameError�strrrr�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�String�Operator�Bracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�list�map�compileZ	tokenprogr�Zsingle3progZdouble3progr�r�r�r�r��	Exceptionrwr{r�r
r�r��ASCIIr�r�r�r�rrrx�sysr��argv�openr��stdinrrrr�<module>s�







8Ic
__pycache__/conv.cpython-36.pyc000064400000015570150467362370012374 0ustar003


 \�%�@s2dZddlZddlmZmZGdd�dej�ZdS)a�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

�N)�grammar�tokenc@s0eZdZdZdd�Zdd�Zdd�Zdd	�Zd
S)�	Convertera2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs |j|�|j|�|j�dS)z<Load the grammar tables from the text files written by pgen.N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r	�*/usr/lib64/python3.6/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	Cs�yt|�}Wn0tk
r<}ztd||f�dSd}~XnXi|_i|_d}x�|D]�}|d7}tjd|�}|r�|j�r�td|||j�f�qT|j�\}}t	|�}||jks�t
�||jks�t
�||j|<||j|<qTWdS)	z�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        zCan't open %s: %sFNr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)�open�OSError�print�
symbol2number�
number2symbol�re�match�strip�groups�int�AssertionError)	r�filename�f�err�lineno�line�mo�symbol�numberr	r	r
r5s*

zConverter.parse_graminit_hc!Cspyt|�}Wn0tk
r<}ztd||f�dSd}~XnXd}|dt|�}}|dksht||f��|dt|�}}|dks�t||f��|dt|�}}i}g}�x*|jd��r�x�|jd��r�tjd	|�}|s�t||f��tt	t
|j���\}	}
}g}xft|�D]Z}
|dt|�}}tjd
|�}|�s<t||f��tt	t
|j���\}}|j
||f��qW|dt|�}}|dk�s�t||f��|||	|
f<|dt|�}}q�Wtjd|�}|�s�t||f��tt	t
|j���\}}|t|�k�s�t||f��g}x�t|�D]~}
|dt|�}}tjd
|�}|�s@t||f��tt	t
|j���\}}	}
||	|
f}|t|�k�s~t||f��|j
|��qW|j
|�|dt|�}}|dk�s�t||f��|dt|�}}q�W||_i}tjd|�}|�s�t||f��t
|jd��}�x�t|�D�]r}|dt|�}}tjd|�}|�sNt||f��|jd�}tt	t
|jdddd���\}}}}|j||k�s�t||f��|j||k�s�t||f��|dk�s�t||f��||}|t|�k�s�t||f��|dt|�}}tjd|�}|�st||f��i}t|jd��}xPt|�D]D\}}t|�}x0td�D]$}|d|>@�rPd||d|<�qPW�q6W||f||<�qW|dt|�}}|dk�s�t||f��||_g}|dt|�}}tjd|�}|�s�t||f��t
|jd��}x|t|�D]p}|dt|�}}tjd|�}|�s>t||f��|j�\}}t
|�}|dk�rbd}nt|�}|j
||f��q
W|dt|�}}|dk�s�t||f��||_|dt|�}}|dk�s�t||f��|dt|�}}tjd|�}|�st||f��t
|jd��}|t|j�k�s&t�|dt|�}}|dk�sNt||f��|dt|�}}tjd|�}|�s~t||f��t
|jd��}|t|j�k�s�t||f��|dt|�}}tjd|�}|�s�t||f��t
|jd��} | |jk�s�t||f��| |_|dt|�}}|dk�s,t||f��y|dt|�}}Wntk
�rXYnXd�slt||f��dS)a�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        zCan't open %s: %sFNrrz#include "pgenheaders.h"
z#include "grammar.h"
zstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z};
z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0zgrammar _PyParser_Grammar = {
z
\s+(\d+),$z	dfas,
z\s+{(\d+), labels},$z	\s+(\d+)$)r
rr�nextr�
startswithrr�list�maprr�range�append�len�states�grouprr�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!rrrrrrZallarcsr-r�n�m�kZarcs�_�i�j�s�t�stater2Zndfasrr�x�y�z�firstZ	rawbitset�cZbyter3Znlabelsr4r	r	r
rTs�

"
zConverter.parse_graminit_ccCs\i|_i|_xJt|j�D]<\}\}}|tjkrB|dk	rB||j|<q|dkr||j|<qWdS)z1Create additional useful structures.  (Internal).N)�keywords�tokensr0r3r�NAME)rZilabel�type�valuer	r	r
r�szConverter.finish_offN)�__name__�
__module__�__qualname__�__doc__rrrrr	r	r	r
r$s	&r)rLrZpgen2rrZGrammarrr	r	r	r
�<module>s__pycache__/driver.cpython-36.opt-1.pyc000064400000011730150467362370013653 0ustar003


 \��@s�dZdZddgZddlZddlZddlZddlZddlZddlZddl	m
Z
mZmZm
Z
mZGdd�de�Zd	d
�Zddd�Zdd�Zdd�Zdd�Zedkr�ejee���dS)zZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrtj�}||_||_dS)N)r�logging�	getLogger�logger�convert)�selfrr
r�r�,/usr/lib64/python3.6/lib2to3/pgen2/driver.py�__init__ s
zDriver.__init__FcCsvtj|j|j�}|j�d}d}d}}}}	}
d}�x4|D�]}|\}}}}	}
|||fkr�|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|jd�r@|d7}d}q@|t	j
kr�tj|}|�r|jj
dt	j|||�|j||||f��r6|�r4|jj
d�Pd}|	\}}|jd�r@|d7}d}q@Wtjd||||f��|jS)	z4Parse a series of tokens and return the syntax tree.rrN��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrr
Zsetupr�COMMENT�NL�endswithr�OPZopmapr�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens'sR



zDriver.parse_tokenscCstj|j�}|j||�S)z*Parse a stream and return the syntax tree.)r�generate_tokens�readliner#)r�streamrrrrr�parse_stream_rawWszDriver.parse_stream_rawcCs|j||�S)z*Parse a stream and return the syntax tree.)r')rr&rrrr�parse_stream\szDriver.parse_streamc
Cs*tj|d|�}z|j||�S|j�XdS)z(Parse a file and return the syntax tree.�rN)�codecs�openr(�close)r�filename�encodingrr&rrr�
parse_file`szDriver.parse_filecCstjtj|�j�}|j||�S)z*Parse a string and return the syntax tree.)rr$�io�StringIOr%r#)r�textrrrrr�parse_stringhszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr#r'r(r/r3rrrrrs

0


cCs:tjj|�\}}|dkrd}||djtttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namensrC�Grammar.txtTFcCs�|dkrtj�}|dkr t|�n|}|s4t||�r�|jd|�tj|�}|r�|jd|�y|j|�Wq�tk
r�}z|jd|�WYdd}~Xq�Xnt	j
�}|j|�|S)z'Load the grammar (maybe from a pickle).Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)r
rrC�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)r@Zgp�save�forcer�g�errrrus
 
cCs8tjj|�sdStjj|�s dStjj|�tjj|�kS)z0Inquire whether file a was written since file b.FT)r8r9�exists�getmtime)�a�brrrrE�s
rEcCsFtjj|�rt|�Sttjj|��}tj||�}tj	�}|j
|�|S)a�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    )r8r9�isfilerrC�basename�pkgutil�get_datarrI�loads)�packageZgrammar_sourceZpickled_name�datarMrrr�load_packaged_grammar�s
rZcGsF|stjdd�}tjtjtjdd�x|D]}t|ddd�q,WdS)z�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    rNz%(message)s)�levelr&�formatT)rKrL)r>�argvr
ZbasicConfig�INFO�stdoutr)�argsr@rrr�main�s
ra�__main__)rDNTFN)�__doc__�
__author__�__all__r*r0r8r
rUr>rrrrrr	�objectrrCrrErZrar4�exit�intrrrr�<module>s$P
	
__pycache__/tokenize.cpython-36.opt-2.pyc000064400000026167150467362370014223 0ustar003


 \NX�=@s�dZdZddlZddlZddlmZmZddlTddlm	Z	dd	�e
e	�D�d
ddgZ[	yeWne
k
rzeZYnXd
d�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZedd�ee�ZdeZeee�Zed ed!�Zeeee�Z d"Z!d#Z"d$Z#d%Z$d&Z%ee%d'e%d(�Z&ee%d)e%d*�Z'ed+d,d-d.d/d0d1d2d3�	Z(d4Z)ed5d6�Z*ee(e)e*�Z+ee e+e'e�Z,ee,Z-ee%d7ed8d�e%d9ed:d��Z.edee&�Z/eee/e e+e.e�Z0e1e2ej3e-e0e#e$f��\Z4Z5Z6Z7ej3e!�ej3e"�e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7e6e7ddddddddd;�4Z8iZ9xd�D]Z:e:e9e:<�q�WiZ;xd�D]Z:e:e;e:<�q�Wd�Z<Gd�d��d�e=�Z>Gd�d��d�e=�Z?d�d��Z@e@fd�d
�ZAd�d��ZBGd�d��d��ZCej3d�ejD�ZEej3d�ejD�ZFd�d��ZGd�d��ZHd�d�ZId�d�ZJeKd�k�r�ddlLZLeMeLjN�dk�rxeAeOeLjNd�jP�neAeLjQjP�dS)�zKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.6/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsddj|�dS)N�(�|�))�join)�choicesrrr�group0srcGst|�dS)Nr)r)rrrr�any1srcGst|�dS)N�?)r)rrrr�maybe2srz[ \f\t]*z	#[^\r\n]*z\\\r?\nz[a-zA-Z_]\w*z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z#(?:[uUrRbBfF]|[rR][bB]|[bBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�")4rrz'''z"""zr'''zr"""zu'''zu"""zb'''zb"""zf'''zf"""zur'''zur"""zbr'''zbr"""zrb'''zrb"""zR'''zR"""zU'''zU"""zB'''zB"""zF'''zF"""zuR'''zuR"""zUr'''zUr"""zUR'''zUR"""zbR'''zbR"""zBr'''zBr"""zBR'''zBR"""zrB'''zrB"""zRb'''zRb"""zRB'''zRB"""�r�R�u�U�f�F�b�B�r'''�r"""�R'''�R"""�u'''�u"""�U'''�U"""�b'''�b"""�B'''�B"""�f'''�f"""�F'''�F"""�ur'''�ur"""�Ur'''�Ur"""�uR'''�uR"""�UR'''�UR"""�br'''�br"""�Br'''�Br"""�bR'''�bR"""�BR'''�BR"""�rb'''�rb"""�Rb'''�Rb"""�rB'''�rB"""�RB'''�RB"""�r'�r"�R'�R"�u'�u"�U'�U"�b'�b"�B'�B"�f'�f"�F'�F"�ur'�ur"�Ur'�Ur"�uR'�uR"�UR'�UR"�br'�br"�Br'�Br"�bR'�bR"�BR'�BR"�rb'�rb"�Rb'�Rb"�rB'�rB"�RB'�RB"�c@seZdZdS)�
TokenErrorN)�__name__�
__module__�__qualname__rrrrrw�srwc@seZdZdS)�StopTokenizingN)rxryrzrrrrr{�sr{c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�sr�cCs(yt||�Wntk
r"YnXdS)N)�
tokenize_loopr{)�readline�
tokeneaterrrrr
�s
cCsxt|�D]}||�q
WdS)N)r)r�r�Z
token_inforrrr��sr�c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs*|\}}||j}|r&|jjd|�dS)N� )r�r��append)r��start�row�col�
col_offsetrrr�add_whitespace�s
zUntokenizer.add_whitespacecCs�xv|D]n}t|�dkr$|j||�P|\}}}}}|j|�|jj|�|\|_|_|ttfkr|jd7_d|_qWdj	|j�S)N�rr�)
�len�compatr�r�r�r�r��NEWLINE�NLr)r��iterable�t�tok_typerr��endr�rrrr�s


zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}x�|D]�}|dd�\}}|ttttfkrn|d7}|tkr�|j|�qBn>|t	kr�|j
�qBn*|ttfkr�d}n|r�|r�||d�d}||�qBWdS)NFr�Tr�r���)r�r��NAME�NUMBERr�r��ASYNC�AWAIT�INDENT�DEDENT�pop)	r�rr��	startline�indents�toks_append�toknum�tokval�tokrrrr��s0

zUntokenizer.compatN)rxryrzr�r�rr�rrrrr��sr�z&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd�j�jdd�}|dks*|jd�r.dS|dks@|jd
�rDdS|S)N�r�-zutf-8zutf-8-�latin-1�
iso-8859-1�iso-latin-1�latin-1-�iso-8859-1-�iso-latin-1-)r�r�r�)r�r�r�)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_names
r�cs�d�d}d}�fdd�}�fdd�}|�}|jt�rHd�|dd�}d	}|sT|gfS||�}|rj||gfStj|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)
NFzutf-8cs"y��Stk
rt�SXdS)N)�
StopIteration�bytesr)r�rr�read_or_stop sz%detect_encoding.<locals>.read_or_stopcs�y|jd�}Wntk
r"dSXtj|�}|s6dSt|jd��}yt|�}Wn tk
rptd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: zutf-8zencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchr�rr�LookupError�SyntaxError�name)r��line_stringr��encoding�codec)�	bom_foundrr�find_cookie&s"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)r�r�blank_rer�)r�r��defaultr�r��first�secondr)r�r�r�detect_encodings0




r�cCst�}|j|�S)N)r�r)r��utrrrrTsc!cs�d}}}tjdd}}d\}}d}dg}	d}
d}d}d}
�x�y
|�}Wntk
rdd}YnX|d}dt|�}}|�rF|s�td|��|j|�}|r�|jd�}}t||d|�|||f||fVd \}}d}nd|�r0|d!d�d	k�r0|d"d�dk�r0t||||t|�f|fVd}d}qBn||}||}qB�nF|dk�rt|�rt|�s`Pd}xf||k�r�||dk�r�|d}n6||d
k�r�|t	dt	}n||dk�r�d}nP|d}�qfW||k�r�P|
�r�|
Vd}
||dk�r�||dk�rh||d�j
d�}|t|�}t|||f||t|�f|fVt||d�||f|t|�f|fVqBttf||dk||d�||f|t|�f|fVqB||	d#k�r�|	j
|�t|d|�|df||f|fVxt||	d$k�rJ||	k�rtdd|||f��|	dd%�}	|�r.||	d&k�r.d}d}
d}td||f||f|fV�q�W|�r�|
�r�||	d'k�r�d}d}
d}n|�s�td|df��d}�x�||k�r8tj||�}|�r|jd�\}}||f||f|}}}|||�||}}||k�s|dk�r|dk�rt||||fV�q4|dk�rft}|dk�r8t}n
|�rBd}
|
�rR|
Vd}
|||||fV�q4|dk�r�|
�r�|
Vd}
t||||fV�q4|tk�rt|}|j||�}|�r�|jd�}|||�}|
�r�|
Vd}
t||||f|fVn||f}||d�}|}P�q4|tk�s@|dd�tk�s@|dd
�tk�r�|d(dk�r�||f}t|�pxt|d�pxt|d}||d�d}}|}Pn |
�r�|
Vd}
t||||fV�q4||k�r�|d)k�r�|�r�|dk�r�tnt||||fV�q�t||||f}|dk�r|
�r|}
�q�|dk�rx|
�rx|
dtk�rx|
ddk�rxd}|	d*}t|
d|
d|
d
|
dfVd}
|
�r�|
Vd}
|Vnz|dk�r�|
�r�|
Vd}
t||||f|fVd}nF|dk�r�|d}n|dk�r�|d}|
�r�|
Vd}
t||||fVn(t||||f||df|fV|d}�q�WqBW|
�rN|
Vd}
x.|	dd�D]} td|df|dfdfV�q\Wtd|df|dfdfVdS)+Nrr�
0123456789r�FrzEOF in multi-line stringr�z\
r�z\
r��	�z#
�#z
z3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.T�
�async�await�def��\z([{z)]})r�r)r�r������r�r�r�r�r�r�)r�r�r�)�stringZ
ascii_lettersr�r�rwr�r��STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr�r�r��IndentationErrorr��
pseudoprog�spanr�r��
triple_quoted�endprogs�
single_quotedr�r�r��OP�	ENDMARKER)!r��lnum�parenlev�	continuedZ	namechars�numchars�contstr�needcont�contliner��stashed�	async_def�async_def_indent�async_def_nlr��pos�max�strstart�endprog�endmatchr��column�
comment_token�nl_pos�pseudomatchr��spos�eposr�initial�newliner��indentrrrrisp



*


 














�__main__)*rrr&r'r(r)r*r+r,r-r.r/r0r1r2r3r4r5r6r7r8r9r:r;r<r=r>r?r@rArBrCrDrErFrGrHrIrJrKrLrM)*rrrNrOrPrQrRrSrTrUrVrWrXrYrZr[r\r]r^r_r`rarbrcrdrerfrgrhrirjrkrlrmrnrorprqrrrsrtru)R�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenr�r�dir�__all__r��	NameError�strrrr�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�String�Operator�Bracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�list�map�compileZ	tokenprogr�Zsingle3progZdouble3progr�r�r�r�r��	Exceptionrwr{r�r
r�r��ASCIIr�r�r�r�rrrx�sysr��argv�openr��stdinrrrr�<module>s�







8Ic
__pycache__/driver.cpython-36.pyc000064400000012010150467362370012704 0ustar003


 \��@s�dZdZddgZddlZddlZddlZddlZddlZddlZddl	m
Z
mZmZm
Z
mZGdd�de�Zd	d
�Zddd�Zdd�Zdd�Zdd�Zedkr�ejee���dS)zZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrtj�}||_||_dS)N)r�logging�	getLogger�logger�convert)�selfrr
r�r�,/usr/lib64/python3.6/lib2to3/pgen2/driver.py�__init__ s
zDriver.__init__FcCs�tj|j|j�}|j�d}d}d}}}}	}
d}�xR|D�]4}|\}}}}	}
|||fkr�||f|ks|t||f|f��|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|j	d�r@|d7}d}q@|t
jk�rtj|}|�r,|j
jdt
j|||�|j||||f��rT|�rR|j
jd�Pd}|	\}}|j	d�r@|d7}d}q@Wtjd||||f��|jS)	z4Parse a series of tokens and return the syntax tree.rrN��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrr
Zsetup�AssertionErrorr�COMMENT�NL�endswithr�OPZopmapr�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens'sT


zDriver.parse_tokenscCstj|j�}|j||�S)z*Parse a stream and return the syntax tree.)r�generate_tokens�readliner$)r�streamrrrrr�parse_stream_rawWszDriver.parse_stream_rawcCs|j||�S)z*Parse a stream and return the syntax tree.)r()rr'rrrr�parse_stream\szDriver.parse_streamc
Cs*tj|d|�}z|j||�S|j�XdS)z(Parse a file and return the syntax tree.�rN)�codecs�openr)�close)r�filename�encodingrr'rrr�
parse_file`szDriver.parse_filecCstjtj|�j�}|j||�S)z*Parse a string and return the syntax tree.)rr%�io�StringIOr&r$)r�textrrrrr�parse_stringhszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr$r(r)r0r4rrrrrs

0


cCs:tjj|�\}}|dkrd}||djtttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namensrD�Grammar.txtTFcCs�|dkrtj�}|dkr t|�n|}|s4t||�r�|jd|�tj|�}|r�|jd|�y|j|�Wq�tk
r�}z|jd|�WYdd}~Xq�Xnt	j
�}|j|�|S)z'Load the grammar (maybe from a pickle).Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)r
rrD�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)rAZgp�save�forcer�g�errrrus
 
cCs8tjj|�sdStjj|�s dStjj|�tjj|�kS)z0Inquire whether file a was written since file b.FT)r9r:�exists�getmtime)�a�brrrrF�s
rFcCsFtjj|�rt|�Sttjj|��}tj||�}tj	�}|j
|�|S)a�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    )r9r:�isfilerrD�basename�pkgutil�get_datarrJ�loads)�packageZgrammar_sourceZpickled_name�datarNrrr�load_packaged_grammar�s
r[cGsF|stjdd�}tjtjtjdd�x|D]}t|ddd�q,WdS)z�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    rNz%(message)s)�levelr'�formatT)rLrM)r?�argvr
ZbasicConfig�INFO�stdoutr)�argsrArrr�main�s
rb�__main__)rENTFN)�__doc__�
__author__�__all__r+r1r9r
rVr?rrrrrr	�objectrrDrrFr[rbr5�exit�intrrrr�<module>s$P
	
__pycache__/driver.cpython-36.opt-2.pyc000064400000007703150467362370013661 0ustar003


 \��@s�dZddgZddlZddlZddlZddlZddlZddlZddlm	Z	m
Z
mZmZm
Z
Gdd�de�Zdd	�Zdd
d�Zdd�Zdd�Zdd�Zedkr�ejee���dS)z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrtj�}||_||_dS)N)r�logging�	getLogger�logger�convert)�selfrr
r�r�,/usr/lib64/python3.6/lib2to3/pgen2/driver.py�__init__ s
zDriver.__init__FcCsvtj|j|j�}|j�d}d}d}}}}	}
d}�x4|D�]}|\}}}}	}
|||fkr�|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|jd�r@|d7}d}q@|t	j
kr�tj|}|�r|jj
dt	j|||�|j||||f��r6|�r4|jj
d�Pd}|	\}}|jd�r@|d7}d}q@Wtjd||||f��|jS)Nrr��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrr
Zsetupr�COMMENT�NL�endswithr�OPZopmapr�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens'sR



zDriver.parse_tokenscCstj|j�}|j||�S)N)r�generate_tokens�readliner#)r�streamrrrrr�parse_stream_rawWszDriver.parse_stream_rawcCs|j||�S)N)r')rr&rrrr�parse_stream\szDriver.parse_streamc
Cs*tj|d|�}z|j||�S|j�XdS)N�r)�codecs�openr(�close)r�filename�encodingrr&rrr�
parse_file`szDriver.parse_filecCstjtj|�j�}|j||�S)N)rr$�io�StringIOr%r#)r�textrrrrr�parse_stringhszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr#r'r(r/r3rrrrrs

0


cCs:tjj|�\}}|dkrd}||djtttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namensrC�Grammar.txtTFcCs�|dkrtj�}|dkr t|�n|}|s4t||�r�|jd|�tj|�}|r�|jd|�y|j|�Wq�tk
r�}z|jd|�WYdd}~Xq�Xnt	j
�}|j|�|S)Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)r
rrC�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)r@Zgp�save�forcer�g�errrrus
 
cCs8tjj|�sdStjj|�s dStjj|�tjj|�kS)NFT)r8r9�exists�getmtime)�a�brrrrE�s
rEcCsFtjj|�rt|�Sttjj|��}tj||�}tj	�}|j
|�|S)N)r8r9�isfilerrC�basename�pkgutil�get_datarrI�loads)�packageZgrammar_sourceZpickled_name�datarMrrr�load_packaged_grammar�s
rZcGsF|stjdd�}tjtjtjdd�x|D]}t|ddd�q,WdS)Nrz%(message)s)�levelr&�formatT)rKrL)r>�argvr
ZbasicConfig�INFO�stdoutr)�argsr@rrr�main�s
ra�__main__)rDNTFN)�
__author__�__all__r*r0r8r
rUr>rrrrrr	�objectrrCrrErZrar4�exit�intrrrr�<module>s"P
	
__pycache__/grammar.cpython-36.opt-1.pyc000064400000015621150467362370014011 0ustar003


 \��@sxdZddlZddlZddlmZmZGdd�de�Zdd�Zd	Z	iZ
x.e	j�D]"ZerNej
�\ZZeee�e
e<qNWdS)
a�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

�N�)�token�tokenizec@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)�Grammara�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)Nr�EMPTY�)rr)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.6/lib2to3/pgen2/grammar.py�__init__MszGrammar.__init__cCs2t|d��}t|j�}tj||d�WdQRXdS)a�Dump the grammar tables to a pickle file.

        dump() recursively changes all dict to OrderedDict, so the pickled file
        is not exactly the same as what was passed in to dump(). load() uses the
        pickled file to create the tables, but  only changes OrderedDict to dict
        at the top level; it does not recursively change OrderedDict to dict.
        So, the loaded tables are different from the original tables that were
        passed to load() in that some of the OrderedDict (from the pickled file)
        are not changed back to dict. For parsing, this has no effect on
        performance because OrderedDict uses dict's __getitem__ with nothing in
        between.
        �wb�N)�open�_make_deterministic�__dict__�pickle�dump)r�filename�f�drrrrXs

zGrammar.dumpc	Cs0t|d��}tj|�}WdQRX|jj|�dS)z+Load the grammar tables from a pickle file.�rbN)rr�loadr�update)rrrrrrrr iszGrammar.loadcCs|jjtj|��dS)z3Load the grammar tables from a pickle bytes object.N)rr!r�loads)rZpklrrrr"osz
Grammar.loadscCsX|j�}x"dD]}t||t||�j��qW|jdd�|_|jdd�|_|j|_|S)	z#
        Copy the grammar.
        rr	rr
rrN)rr	rr
rr)�	__class__�setattr�getattr�copyrr
r)r�newZ	dict_attrrrrr&sszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�d	S)
z:Dump the grammar tables to standard output, for debugging.r)�pprintZs2nZn2sr
rrrN)r(�printrr	r
rrr)rr(rrr�report�s




zGrammar.reportN)
�__name__�
__module__�__qualname__�__doc__rrr r"r&r*rrrrrs4
rcCs^t|t�r&tjtdd�|j�D���St|t�r>dd�|D�St|t�rZtdd�|D��S|S)Ncss|]\}}|t|�fVqdS)N)r)�.0�k�vrrr�	<genexpr>�sz&_make_deterministic.<locals>.<genexpr>cSsg|]}t|��qSr)r)r/�errr�
<listcomp>�sz'_make_deterministic.<locals>.<listcomp>css|]}t|�VqdS)N)r)r/r3rrrr2�s)�
isinstance�dict�collections�OrderedDict�sorted�items�list�tuple)�toprrrr�s


ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
)r.r7r�rr�objectrrZ	opmap_rawZopmap�
splitlines�line�split�op�namer%rrrr�<module>
sy=__pycache__/pgen.cpython-36.opt-2.pyc000064400000022165150467362370013316 0ustar003


 \�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.6/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_tj|j�|_|j�|j	�\|_
|_|dk	rZ|�i|_|j
�dS)N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__cCs*t�}t|jj��}|j�|j|j�|jd|j�x.|D]&}dt|j	�}||j	|<||j
|<q<Wx�|D]�}|j|}g}xl|D]d}g}x6t|jj
��D]$\}	}
|j|j||	�|j|
�f�q�W|jr�|jd|j|�f�|j|�q�W|jj|�||j||�f|j|j	|<qlW|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar+�stater%�label�nextr	r	r
�make_grammars.




  zParserGenerator.make_grammarcCs8|j|}i}x$t|�D]}|j||�}d||<qW|S)Nr)rr$r()rr.r0Zrawfirstrr4�ilabelr	r	r
r,4s
zParserGenerator.make_firstcCs&t|j�}|dj�r�||jkrZ||jkr4|j|S|jj|j|df�||j|<|Sn>tt|d�}||jkrz|j|S|jj|df�||j|<|Sn�t	|�}|dj�r�||j
kr�|j
|S|jjtj|f�||j
|<|Sn>tj
|}||jk�r|j|S|jj|df�||j|<|SdS)Nr)r"Zlabels�isalphar#Zsymbol2labelr'�getattrr�tokens�eval�keywords�NAMErZopmap)rr.r4r7Zitoken�valuer	r	r
r(=s6













zParserGenerator.make_labelcCs<t|jj��}|j�x |D]}||jkr|j|�qWdS)N)rrrrr�	calcfirst)rr/r0r	r	r
rks


zParserGenerator.addfirstsetsc	Cs
|j|}d|j|<|d}i}i}x�|jj�D]x\}}||jkr�||jkrl|j|}|dkr�td|��n|j|�|j|}|j|�|||<q0d||<|di||<q0Wi}	xJ|j�D]>\}}
x4|
D],}||	kr�td||||	|f��||	|<q�Wq�W||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr%r&�
ValueErrorr?�update)rr0r2r3ZtotalsetZoverlapcheckr4r5�fsetZinverseZitsfirstZsymbolr	r	r
r?ss2









zParserGenerator.calcfirstc	Cs�i}d}x�|jtjkr�x|jtjkr.|j�qW|jtj�}|jtjd�|j�\}}|jtj�|j	||�}t
|�}|j|�t
|�}|||<|dkr
|}q
W||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectr=�OP�	parse_rhs�make_dfar"�simplify_dfa)	rrrr0�a�zr2ZoldlenZnewlenr	r	r
r�s"
zParserGenerator.parsecs��fdd�}�fdd��t||�|�g}x�|D]�}i}x<|jD]2}x,|jD]"\}}	|dk	rJ�|	|j|i��qJWq>WxRt|j��D]B\}}
x,|D]}|j|
kr�Pq�Wt|
|�}|j|�|j||�q�Wq.W|S)Ncsi}�||�|S)Nr	)r3�base)�
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecs>||krdSd||<x$|jD]\}}|dkr�||�qWdS)Nr)r%)r3rNr4r5)rOr	r
rO�sz,ParserGenerator.make_dfa.<locals>.addclosure)�DFAState�nfasetr%�
setdefaultr$r&r'�addarc)rr-�finishrPr+r3r%Znfastater4r5rR�str	)rOr
rJ�s"




zParserGenerator.make_dfac
Cs�td|�|g}x�t|�D]�\}}td|||kr4dp6d�x^|jD]T\}}||kr^|j|�}	nt|�}	|j|�|dkr�td|	�qBtd||	f�qBWqWdS)NzDump of NFA forz  Statez(final)�z	    -> %dz    %s -> %d)�print�	enumerater%r)r"r')
rr0r-rUZtodor1r3r4r5�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsltd|�x\t|�D]P\}}td||jr,dp.d�x0t|jj��D]\}}td||j|�f�qBWqWdS)NzDump of DFA forz  Statez(final)rWz    %s -> %d)rXrYr*r$r%r&r))rr0r2r1r3r4r5r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCs~d}xt|rxd}xft|�D]Z\}}xPt|dt|��D]:}||}||kr4||=x|D]}|j||�qTWd}Pq4WqWqWdS)NTFr)rY�ranger"�
unifystate)rr2Zchangesr1Zstate_irZZstate_jr3r	r	r
rK�s
zParserGenerator.simplify_dfacCs�|j�\}}|jdkr||fSt�}t�}|j|�|j|�x6|jdkrt|j�|j�\}}|j|�|j|�q@W||fSdS)N�|)�	parse_altr>�NFAStaterTr)rrLrMZaaZzzr	r	r
rI�s



zParserGenerator.parse_rhscCsP|j�\}}x:|jdks*|jtjtjfkrF|j�\}}|j|�|}qW||fS)N�(�[)rbrc)�
parse_itemr>rDrr=�STRINGrT)rrL�br.�dr	r	r
r`
s
zParserGenerator.parse_altcCs�|jdkr>|j�|j�\}}|jtjd�|j|�||fS|j�\}}|j}|dkr`||fS|j�|j|�|dkr�||fS||fSdS)Nrc�]�+�*)rirj)r>rrIrGrrHrT�
parse_atom)rrLrMr>r	r	r
rds


zParserGenerator.parse_itemcCs�|jdkr4|j�|j�\}}|jtjd�||fS|jtjtjfkrpt	�}t	�}|j
||j�|j�||fS|jd|j|j�dS)Nrb�)z+expected (...) or NAME or STRING, got %s/%s)r>rrIrGrrHrDr=rerarT�raise_error)rrLrMr	r	r
rk(s
zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|jd|||j|j�|j}|j�|S)Nzexpected %s/%s, got %s/%s)rDr>rmr)rrDr>r	r	r
rG9szParserGenerator.expectcCsJt|j�}x"|dtjtjfkr,t|j�}qW|\|_|_|_|_|_	dS)Nr)
r5rr�COMMENT�NLrDr>Zbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokencGs^|r8y||}Wn&dj|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrrprq)r�msg�argsr	r	r
rmHs zParserGenerator.raise_error)N)N)rrrrr6r,r(rr?rrJr[r\rKrIr`rdrkrGrrmr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)racCs
g|_dS)N)r%)rr	r	r
rSszNFAState.__init__NcCs|jj||f�dS)N)r%r')rr5r4r	r	r
rTVszNFAState.addarc)N)rrrrrTr	r	r	r
raQsrac@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
rQcCs||_||k|_i|_dS)N)rRr*r%)rrR�finalr	r	r
r]s
zDFAState.__init__cCs||j|<dS)N)r%)rr5r4r	r	r
rTeszDFAState.addarccCs.x(|jj�D]\}}||kr||j|<qWdS)N)r%r&)r�old�newr4r5r	r	r
r^kszDFAState.unifystatecCsX|j|jkrdSt|j�t|j�kr(dSx*|jj�D]\}}||jj|�k	r4dSq4WdS)NFT)r*r"r%r&�get)r�otherr4r5r	r	r
�__eq__pszDFAState.__eq__N)rrrrrTr^r�__hash__r	r	r	r
rQ[s
rQ�Grammar.txtcCst|�}|j�S)N)rr6)r�pr	r	r
�generate_grammar�sr�N)r�)rWrrrZGrammarr�objectrrarQr�r	r	r	r
�<module>sI
%__pycache__/token.cpython-36.pyc000064400000003474150467362370012547 0ustar003


 \�@sPdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<d=Z=iZ>x6e?e@�jA��D]$\ZBZCeDeC�eDd�k�reBe>eC<�qWd>d?�ZEd@dA�ZFdBdC�ZGdDS)Ez!Token constants (from "token.h").����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�cCs|tkS)N)�	NT_OFFSET)�x�r@�+/usr/lib64/python3.6/lib2to3/pgen2/token.py�
ISTERMINALNsrBcCs|tkS)N)r>)r?r@r@rA�
ISNONTERMINALQsrCcCs|tkS)N)�	ENDMARKER)r?r@r@rA�ISEOFTsrEN)H�__doc__rD�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�N_TOKENSr>�tok_name�list�globals�items�_nameZ_value�typerBrCrEr@r@r@rA�<module>s�__pycache__/parse.cpython-36.opt-1.pyc000064400000014233150467362370013473 0ustar003


 \u�@s4dZddlmZGdd�de�ZGdd�de�ZdS)z�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

�)�tokenc@seZdZdZdd�ZdS)�
ParseErrorz(Exception to signal the parser is stuck.cCs4tj|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.6/lib2to3/pgen2/parse.pyrszParseError.__init__N)�__name__�
__module__�__qualname__�__doc__rrrrrrsrc@sLeZdZdZddd�Zddd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�Parsera5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    NcCs||_|pdd�|_dS)a�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|S)Nr)�grammar�noderrr�<lambda>Wsz!Parser.__init__.<locals>.<lambda>N)r�convert)r
rrrrrr9szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)a�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setupYs
zParser.setupcCs:|j|||�}�x$|jd	\}}}|\}}	||}
�x�|
D]�\}}|jj|\}
}||kr�|j||||�|}x@||d|fgkr�|j�|js�dS|jd
\}}}|\}}	qpWdS|
dkr:|jj|
}|\}}||kr:|j|
|jj|
||�Pq:Wd|f|
k�r$|j�|j�s2td|||��qtd|||��qWdS)z<Add a token; return True iff this is the end of the program.rrTF�ztoo much inputz	bad inputN���r )	�classifyrrZlabels�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokenqs:zParser.addtokencCsX|tjkr0|jj|�|jjj|�}|dk	r0|S|jjj|�}|dkrTtd|||��|S)z&Turn a token into a label.  (Internal)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r%rrrr!�s
zParser.classifyc	CsT|jd\}}}|||df}|j|j|�}|dk	r@|dj|�|||f|jd<dS)zShift a token.  (Internal)rNr r r )rrr�append)	r
rrr*r	r&r'rrrrrr"�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|jj|d|f�dS)zPush a nonterminal.  (Internal)rNrr r )rr3)	r
rZnewdfar*r	r&r'rrrrrr$�szParser.pushcCs`|jj�\}}}|j|j|�}|dk	r\|jrL|jd\}}}|dj|�n||_|j|j_dS)zPop a nonterminal.  (Internal)Nrr r )rr#rrr3rr)r
ZpopdfaZpopstateZpopnoderr&r'rrrrr#�sz
Parser.pop)N)N)r
rrrrrr-r!r"r$r#rrrrrs
 
0	rN)r�rrr�objectrrrrr�<module>s__pycache__/parse.cpython-36.opt-2.pyc000064400000006006150467362370013473 0ustar003


 \u�@s0ddlmZGdd�de�ZGdd�de�ZdS)�)�tokenc@seZdZdd�ZdS)�
ParseErrorcCs4tj|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.6/lib2to3/pgen2/parse.pyrszParseError.__init__N)�__name__�
__module__�__qualname__rrrrrrsrc@sHeZdZddd�Zddd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dS)�ParserNcCs||_|pdd�|_dS)NcSs|S)Nr)�grammar�noderrr�<lambda>Wsz!Parser.__init__.<locals>.<lambda>)r�convert)r
rrrrrr9szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setupYs
zParser.setupcCs:|j|||�}�x$|jd\}}}|\}}	||}
�x�|
D]�\}}|jj|\}
}||kr�|j||||�|}x@||d|fgkr�|j�|js�dS|jd	\}}}|\}}	qpWdS|
dkr:|jj|
}|\}}||kr:|j|
|jj|
||�Pq:Wd|f|
k�r$|j�|j�s2td|||��qtd|||��qWdS)
NrrTF�ztoo much inputz	bad input���r)	�classifyrrZlabels�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokenqs:zParser.addtokencCsX|tjkr0|jj|�|jjj|�}|dk	r0|S|jjj|�}|dkrTtd|||��|S)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r$rrrr �s
zParser.classifyc	CsT|jd\}}}|||df}|j|j|�}|dk	r@|dj|�|||f|jd<dS)Nrrrr)rrr�append)	r
rrr)r	r%r&rrrrrr!�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|jj|d|f�dS)Nrrrr)rr2)	r
rZnewdfar)r	r%r&rrrrrr#�szParser.pushcCs`|jj�\}}}|j|j|�}|dk	r\|jrL|jd\}}}|dj|�n||_|j|j_dS)Nrrr)rr"rrr2rr)r
ZpopdfaZpopstateZpopnoderr%r&rrrrr"�sz
Parser.pop)N)N)
r
rrrrr,r r!r#r"rrrrrs
 
0	rN)�rrr�objectrrrrr�<module>s__pycache__/token.cpython-36.opt-2.pyc000064400000003412150467362370013477 0ustar003


 \�@sLdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<iZ=x6e>e?�j@��D]$\ZAZBeCeB�eCd�k�reAe=eB<�qWd=d>�ZDd?d@�ZEdAdB�ZFdCS)D����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�cCs|tkS)N)�	NT_OFFSET)�x�r@�+/usr/lib64/python3.6/lib2to3/pgen2/token.py�
ISTERMINALNsrBcCs|tkS)N)r>)r?r@r@rA�
ISNONTERMINALQsrCcCs|tkS)N)�	ENDMARKER)r?r@r@rA�ISEOFTsrEN)GrD�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�N_TOKENSr>�tok_name�list�globals�items�_nameZ_value�typerBrCrEr@r@r@rA�<module>	s�__pycache__/grammar.cpython-36.opt-2.pyc000064400000006253150467362370014013 0ustar003


 \��@stddlZddlZddlmZmZGdd�de�Zdd�ZdZiZ	x.ej
�D]"ZerJej�\Z
Zeee�e	e
<qJWdS)	�N�)�token�tokenizec@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)�GrammarcCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)Nr�EMPTY�)rr)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.6/lib2to3/pgen2/grammar.py�__init__MszGrammar.__init__cCs2t|d��}t|j�}tj||d�WdQRXdS)N�wb�)�open�_make_deterministic�__dict__�pickle�dump)r�filename�f�drrrrXs

zGrammar.dumpc	Cs0t|d��}tj|�}WdQRX|jj|�dS)N�rb)rr�loadr�update)rrrrrrrr iszGrammar.loadcCs|jjtj|��dS)N)rr!r�loads)rZpklrrrr"osz
Grammar.loadscCsX|j�}x"dD]}t||t||�j��qW|jdd�|_|jdd�|_|j|_|S)Nrr	rr
rr)rr	rr
rr)�	__class__�setattr�getattr�copyrr
r)r�newZ	dict_attrrrrr&sszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�dS)	Nr)�pprintZs2nZn2sr
rrr)r(�printrr	r
rrr)rr(rrr�report�s




zGrammar.reportN)	�__name__�
__module__�__qualname__rrr r"r&r*rrrrrs6
rcCs^t|t�r&tjtdd�|j�D���St|t�r>dd�|D�St|t�rZtdd�|D��S|S)Ncss|]\}}|t|�fVqdS)N)r)�.0�k�vrrr�	<genexpr>�sz&_make_deterministic.<locals>.<genexpr>cSsg|]}t|��qSr)r)r.�errr�
<listcomp>�sz'_make_deterministic.<locals>.<listcomp>css|]}t|�VqdS)N)r)r.r2rrrr1�s)�
isinstance�dict�collections�OrderedDict�sorted�items�list�tuple)�toprrrr�s


ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
)r6r�rr�objectrrZ	opmap_rawZopmap�
splitlines�line�split�op�namer%rrrr�<module>sy=__pycache__/conv.cpython-36.opt-2.pyc000064400000007120150467362370013324 0ustar003


 \�%�@s.ddlZddlmZmZGdd�dej�ZdS)�N)�grammar�tokenc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�	ConvertercCs |j|�|j|�|j�dS)N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r	�*/usr/lib64/python3.6/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	Cs�yt|�}Wn0tk
r<}ztd||f�dSd}~XnXi|_i|_d}xn|D]f}|d7}tjd|�}|r�|j�r�td|||j�f�qT|j�\}}t	|�}||j|<||j|<qTWdS)NzCan't open %s: %sFr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)
�open�OSError�printZ
symbol2numberZ
number2symbol�re�match�strip�groups�int)	r�filename�f�err�lineno�line�mo�symbol�numberr	r	r
r5s&

zConverter.parse_graminit_hc!Cs�yt|�}Wn0tk
r<}ztd||f�dSd}~XnXd}|dt|�}}|dt|�}}|dt|�}}i}g}�x�|jd��rx�|jd��rLtjd|�}ttt	|j
���\}	}
}g}xRt|�D]F}
|dt|�}}tjd|�}ttt	|j
���\}}|j||f�q�W|dt|�}}|||	|
f<|dt|�}}q�Wtjd|�}ttt	|j
���\}}g}x^t|�D]R}
|dt|�}}tjd	|�}ttt	|j
���\}}	}
||	|
f}|j|��q~W|j|�|dt|�}}|dt|�}}q�W||_
i}tjd
|�}t	|jd��}x�t|�D]�}|dt|�}}tjd|�}|jd�}ttt	|jdd
dd���\}}}}||}|dt|�}}tjd|�}i}t|jd��}xPt|�D]D\}}t|�}x0td�D]$}|d|>@�r�d||d|<�q�W�q�W||f||<�q4W|dt|�}}||_g}|dt|�}}tjd|�}t	|jd��}xjt|�D]^}|dt|�}}tjd|�}|j
�\}}t	|�}|dk�r�d}nt|�}|j||f��qpW|dt|�}}||_|dt|�}}|dt|�}}tjd|�}t	|jd��}|dt|�}}|dt|�}}tjd|�}t	|jd��}|dt|�}}tjd|�}t	|jd��} | |_|dt|�}}y|dt|�}}Wntk
�r�YnXdS)NzCan't open %s: %sFrrzstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0z
\s+(\d+),$z\s+{(\d+), labels},$z	\s+(\d+)$)r
rr�next�
startswithrr�list�maprr�range�append�states�group�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!rrrrrrZallarcsr)r�n�m�kZarcs�_�i�j�s�t�stater.Zndfasrr�x�y�z�firstZ	rawbitset�cZbyter/Znlabelsr0r	r	r
rTs�

"
zConverter.parse_graminit_ccCs\i|_i|_xJt|j�D]<\}\}}|tjkrB|dk	rB||j|<q|dkr||j|<qWdS)N)�keywords�tokensr,r/r�NAME)rZilabel�type�valuer	r	r
r�szConverter.finish_offN)�__name__�
__module__�__qualname__rrrrr	r	r	r
r$s
&r)rZpgen2rrZGrammarrr	r	r	r
�<module>s__pycache__/parse.cpython-36.pyc000064400000014271150467362370012536 0ustar003


 \u�@s4dZddlmZGdd�de�ZGdd�de�ZdS)z�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

�)�tokenc@seZdZdZdd�ZdS)�
ParseErrorz(Exception to signal the parser is stuck.cCs4tj|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.6/lib2to3/pgen2/parse.pyrszParseError.__init__N)�__name__�
__module__�__qualname__�__doc__rrrrrrsrc@sLeZdZdZddd�Zddd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�Parsera5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    NcCs||_|pdd�|_dS)a�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|S)Nr)�grammar�noderrr�<lambda>Wsz!Parser.__init__.<locals>.<lambda>N)r�convert)r
rrrrrr9szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)a�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setupYs
zParser.setupcCsF|j|||�}�x0|jd	\}}}|\}}	||}
�x|
D]�\}}|jj|\}
}||kr�|
dksft�|j||||�|}x@||d|fgkr�|j�|js�dS|jd
\}}}|\}}	q|WdS|
dkr:|jj|
}|\}}||kr:|j|
|jj|
||�Pq:Wd|f|
k�r0|j�|j�s>t	d|||��qt	d|||��qWdS)z<Add a token; return True iff this is the end of the program.r�rTFztoo much inputz	bad inputN���r )
�classifyrrZlabels�AssertionError�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokenqs<zParser.addtokencCsX|tjkr0|jj|�|jjj|�}|dk	r0|S|jjj|�}|dkrTtd|||��|S)z&Turn a token into a label.  (Internal)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r&rrrr!�s
zParser.classifyc	CsT|jd\}}}|||df}|j|j|�}|dk	r@|dj|�|||f|jd<dS)zShift a token.  (Internal)rNr r r )rrr�append)	r
rrr+r	r'r(rrrrrr#�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|jj|d|f�dS)zPush a nonterminal.  (Internal)rNrr r )rr4)	r
rZnewdfar+r	r'r(rrrrrr%�szParser.pushcCs`|jj�\}}}|j|j|�}|dk	r\|jrL|jd\}}}|dj|�n||_|j|j_dS)zPop a nonterminal.  (Internal)Nrr r )rr$rrr4rr)r
ZpopdfaZpopstateZpopnoderr'r(rrrrr$�sz
Parser.pop)N)N)r
rrrrrr.r!r#r%r$rrrrrs
 
0	rN)r�rrr�objectrrrrr�<module>s__pycache__/__init__.cpython-36.pyc000064400000000237150467362370013160 0ustar003


 \��@sdZdS)zThe pgen2 package.N)�__doc__�rr�./usr/lib64/python3.6/lib2to3/pgen2/__init__.py�<module>s__pycache__/__init__.cpython-36.opt-1.pyc000064400000000237150467362370014117 0ustar003


 \��@sdZdS)zThe pgen2 package.N)�__doc__�rr�./usr/lib64/python3.6/lib2to3/pgen2/__init__.py�<module>s__pycache__/conv.cpython-36.opt-1.pyc000064400000014004150467362370013322 0ustar003


 \�%�@s2dZddlZddlmZmZGdd�dej�ZdS)a�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

�N)�grammar�tokenc@s0eZdZdZdd�Zdd�Zdd�Zdd	�Zd
S)�	Convertera2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs |j|�|j|�|j�dS)z<Load the grammar tables from the text files written by pgen.N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r	�*/usr/lib64/python3.6/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	Cs�yt|�}Wn0tk
r<}ztd||f�dSd}~XnXi|_i|_d}xn|D]f}|d7}tjd|�}|r�|j�r�td|||j�f�qT|j�\}}t	|�}||j|<||j|<qTWdS)	z�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        zCan't open %s: %sFNr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)
�open�OSError�printZ
symbol2numberZ
number2symbol�re�match�strip�groups�int)	r�filename�f�err�lineno�line�mo�symbol�numberr	r	r
r5s&

zConverter.parse_graminit_hc!Cs�yt|�}Wn0tk
r<}ztd||f�dSd}~XnXd}|dt|�}}|dt|�}}|dt|�}}i}g}�x�|jd��rx�|jd��rLtjd|�}ttt	|j
���\}	}
}g}xRt|�D]F}
|dt|�}}tjd|�}ttt	|j
���\}}|j||f�q�W|dt|�}}|||	|
f<|dt|�}}q�Wtjd	|�}ttt	|j
���\}}g}x^t|�D]R}
|dt|�}}tjd
|�}ttt	|j
���\}}	}
||	|
f}|j|��q~W|j|�|dt|�}}|dt|�}}q�W||_
i}tjd|�}t	|jd��}x�t|�D]�}|dt|�}}tjd|�}|jd
�}ttt	|jdddd���\}}}}||}|dt|�}}tjd|�}i}t|jd��}xPt|�D]D\}}t|�}x0td�D]$}|d|>@�r�d||d|<�q�W�q�W||f||<�q4W|dt|�}}||_g}|dt|�}}tjd|�}t	|jd��}xjt|�D]^}|dt|�}}tjd|�}|j
�\}}t	|�}|dk�r�d}nt|�}|j||f��qpW|dt|�}}||_|dt|�}}|dt|�}}tjd|�}t	|jd��}|dt|�}}|dt|�}}tjd|�}t	|jd��}|dt|�}}tjd|�}t	|jd��} | |_|dt|�}}y|dt|�}}Wntk
�r�YnXdS)a�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        zCan't open %s: %sFNrrzstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0z
\s+(\d+),$z\s+{(\d+), labels},$z	\s+(\d+)$)r
rr�next�
startswithrr�list�maprr�range�append�states�group�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!rrrrrrZallarcsr)r�n�m�kZarcs�_�i�j�s�t�stater.Zndfasrr�x�y�z�firstZ	rawbitset�cZbyter/Znlabelsr0r	r	r
rTs�

"
zConverter.parse_graminit_ccCs\i|_i|_xJt|j�D]<\}\}}|tjkrB|dk	rB||j|<q|dkr||j|<qWdS)z1Create additional useful structures.  (Internal).N)�keywords�tokensr,r/r�NAME)rZilabel�type�valuer	r	r
r�szConverter.finish_offN)�__name__�
__module__�__qualname__�__doc__rrrrr	r	r	r
r$s	&r)rHrZpgen2rrZGrammarrr	r	r	r
�<module>s__pycache__/literals.cpython-36.opt-2.pyc000064400000002365150467362370014204 0ustar003


 \O�@sLddlZdddddddd	d
dd�
Zd
d�Zdd�Zdd�ZedkrHe�dS)�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|jdd�\}}tj|�}|dk	r&|S|jd�r�|dd�}t|�dkrTtd|��yt|d�}Wq�tk
r�td|��Yq�Xn0yt|d�}Wn tk
r�td|��YnXt|�S)	Nr��x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))�group�simple_escapes�get�
startswith�len�
ValueError�int�chr)�m�all�tail�escZhexes�i�r%�./usr/lib64/python3.6/lib2to3/pgen2/literals.py�escapes"

r'cCsH|d}|dd�|dkr$|d}|t|�t|��}tjdt|�S)Nr�z)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))r�re�subr')�s�qr%r%r&�
evalString(s
r-cCsDx>td�D]2}t|�}t|�}t|�}||kr
t||||�q
WdS)N�)�ranger�reprr-�print)r$�cr+�er%r%r&�test2sr4�__main__)r)rr'r-r4�__name__r%r%r%r&�<module>s
	tokenize.py000064400000051077150467362370006775 0ustar00# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.

"""Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'

import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *

from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
           "generate_tokens", "untokenize"]
del token

try:
    bytes
except NameError:
    # Support bytes type in Python <= 2.5, so 2to3 turns itself into
    # valid Python 3 code.
    bytes = str

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
def _combinations(*l):
    return set(
        x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()
    )

Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'

Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
Octnumber = r'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'
Decnumber = group(r'[1-9]\d*(?:_\d+)*[lL]?', '0[lL]?')
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+(?:_\d+)*'
Pointfloat = group(r'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?', r'\.\d+(?:_\d+)*') + maybe(Exponent)
Expfloat = r'\d+(?:_\d+)*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+(?:_\d+)*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?"
Triple = group(_litprefix + "'''", _litprefix + '"""')
# Single-line ' or " string.
String = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
                 r"//=?", r"->",
                 r"[+\-*/%&@|^=<>]=?",
                 r"~")

Bracket = '[][(){}]'
Special = group(r'\r?\n', r':=', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)

PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken

# First (or only) line of ' or " string.
ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                group("'", r'\\\r?\n'),
                _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)

tokenprog, pseudoprog, single3prog, double3prog = map(
    re.compile, (Token, PseudoToken, Single3, Double3))

_strprefixes = (
    _combinations('r', 'R', 'f', 'F') |
    _combinations('r', 'R', 'b', 'B') |
    {'u', 'U', 'ur', 'uR', 'Ur', 'UR'}
)

endprogs = {"'": re.compile(Single), '"': re.compile(Double),
            "'''": single3prog, '"""': double3prog,
            **{f"{prefix}'''": single3prog for prefix in _strprefixes},
            **{f'{prefix}"""': double3prog for prefix in _strprefixes},
            **{prefix: None for prefix in _strprefixes}}

triple_quoted = (
    {"'''", '"""'} |
    {f"{prefix}'''" for prefix in _strprefixes} |
    {f'{prefix}"""' for prefix in _strprefixes}
)
single_quoted = (
    {"'", '"'} |
    {f"{prefix}'" for prefix in _strprefixes} |
    {f'{prefix}"' for prefix in _strprefixes}
)

tabsize = 8

class TokenError(Exception): pass

class StopTokenizing(Exception): pass

def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
    (srow, scol) = xxx_todo_changeme
    (erow, ecol) = xxx_todo_changeme1
    print("%d,%d-%d,%d:\t%s\t%s" % \
        (srow, scol, erow, ecol, tok_name[type], repr(token)))

def tokenize(readline, tokeneater=printtoken):
    """
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    """
    try:
        tokenize_loop(readline, tokeneater)
    except StopTokenizing:
        pass

# backwards compatible interface
def tokenize_loop(readline, tokeneater):
    for token_info in generate_tokens(readline):
        tokeneater(*token_info)

class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0

    def add_whitespace(self, start):
        row, col = start
        assert row <= self.prev_row
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
        for t in iterable:
            if len(t) == 2:
                self.compat(t, iterable)
                break
            tok_type, token, start, end, line = t
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        startline = False
        indents = []
        toks_append = self.tokens.append
        toknum, tokval = token
        if toknum in (NAME, NUMBER):
            tokval += ' '
        if toknum in (NEWLINE, NL):
            startline = True
        for tok in iterable:
            toknum, tokval = tok[:2]

            if toknum in (NAME, NUMBER, ASYNC, AWAIT):
                tokval += ' '

            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)

cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)

def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

def detect_encoding(readline):
    """
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
    bom_found = False
    encoding = None
    default = 'utf-8'
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return bytes()

    def find_cookie(line):
        try:
            line_string = line.decode('ascii')
        except UnicodeDecodeError:
            return None
        match = cookie_re.match(line_string)
        if not match:
            return None
        encoding = _get_normal_name(match.group(1))
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
            raise SyntaxError("unknown encoding: " + encoding)

        if bom_found:
            if codec.name != 'utf-8':
                # This behaviour mimics the Python interpreter
                raise SyntaxError('encoding problem: utf-8')
            encoding += '-sig'
        return encoding

    first = read_or_stop()
    if first.startswith(BOM_UTF8):
        bom_found = True
        first = first[3:]
        default = 'utf-8-sig'
    if not first:
        return default, []

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
    if not blank_re.match(first):
        return default, [first]

    second = read_or_stop()
    if not second:
        return default, [first]

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

    return default, [first, second]

def untokenize(iterable):
    """Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    """
    ut = Untokenizer()
    return ut.untokenize(iterable)

def generate_tokens(readline):
    """
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    physical line.
    """
    lnum = parenlev = continued = 0
    contstr, needcont = '', 0
    contline = None
    indents = [0]

    # 'stashed' and 'async_*' are used for async/await parsing
    stashed = None
    async_def = False
    async_def_indent = 0
    async_def_nl = False

    while 1:                                   # loop over lines in stream
        try:
            line = readline()
        except StopIteration:
            line = ''
        lnum = lnum + 1
        pos, max = 0, len(line)

        if contstr:                            # continued string
            if not line:
                raise TokenError("EOF in multi-line string", strstart)
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
                yield (STRING, contstr + line[:end],
                       strstart, (lnum, end), contline + line)
                contstr, needcont = '', 0
                contline = None
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
                yield (ERRORTOKEN, contstr + line,
                           strstart, (lnum, len(line)), contline)
                contstr = ''
                contline = None
                continue
            else:
                contstr = contstr + line
                contline = contline + line
                continue

        elif parenlev == 0 and not continued:  # new statement
            if not line: break
            column = 0
            while pos < max:                   # measure leading whitespace
                if line[pos] == ' ': column = column + 1
                elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f': column = 0
                else: break
                pos = pos + 1
            if pos == max: break

            if stashed:
                yield stashed
                stashed = None

            if line[pos] in '#\r\n':           # skip comments or blank lines
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    nl_pos = pos + len(comment_token)
                    yield (COMMENT, comment_token,
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
                    yield (NL, line[nl_pos:],
                           (lnum, nl_pos), (lnum, len(line)), line)
                else:
                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
                           (lnum, pos), (lnum, len(line)), line)
                continue

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
            while column < indents[-1]:
                if column not in indents:
                    raise IndentationError(
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
                indents = indents[:-1]

                if async_def and async_def_indent >= indents[-1]:
                    async_def = False
                    async_def_nl = False
                    async_def_indent = 0

                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)

            if async_def and async_def_nl and async_def_indent >= indents[-1]:
                async_def = False
                async_def_nl = False
                async_def_indent = 0

        else:                                  # continued statement
            if not line:
                raise TokenError("EOF in multi-line statement", (lnum, 0))
            continued = 0

        while pos < max:
            pseudomatch = pseudoprog.match(line, pos)
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
                spos, epos, pos = (lnum, start), (lnum, end), end
                token, initial = line[start:end], line[start]

                if initial in string.digits or \
                   (initial == '.' and token != '.'):      # ordinary number
                    yield (NUMBER, token, spos, epos, line)
                elif initial in '\r\n':
                    newline = NEWLINE
                    if parenlev > 0:
                        newline = NL
                    elif async_def:
                        async_def_nl = True
                    if stashed:
                        yield stashed
                        stashed = None
                    yield (newline, token, spos, epos, line)

                elif initial == '#':
                    assert not token.endswith("\n")
                    if stashed:
                        yield stashed
                        stashed = None
                    yield (COMMENT, token, spos, epos, line)
                elif token in triple_quoted:
                    endprog = endprogs[token]
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
                        token = line[start:pos]
                        if stashed:
                            yield stashed
                            stashed = None
                        yield (STRING, token, spos, (lnum, pos), line)
                    else:
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
                        contline = line
                        break
                elif initial in single_quoted or \
                    token[:2] in single_quoted or \
                    token[:3] in single_quoted:
                    if token[-1] == '\n':                  # continued string
                        strstart = (lnum, start)
                        endprog = (endprogs[initial] or endprogs[token[1]] or
                                   endprogs[token[2]])
                        contstr, needcont = line[start:], 1
                        contline = line
                        break
                    else:                                  # ordinary string
                        if stashed:
                            yield stashed
                            stashed = None
                        yield (STRING, token, spos, epos, line)
                elif initial.isidentifier():               # ordinary name
                    if token in ('async', 'await'):
                        if async_def:
                            yield (ASYNC if token == 'async' else AWAIT,
                                   token, spos, epos, line)
                            continue

                    tok = (NAME, token, spos, epos, line)
                    if token == 'async' and not stashed:
                        stashed = tok
                        continue

                    if token == 'def':
                        if (stashed
                                and stashed[0] == NAME
                                and stashed[1] == 'async'):

                            async_def = True
                            async_def_indent = indents[-1]

                            yield (ASYNC, stashed[1],
                                   stashed[2], stashed[3],
                                   stashed[4])
                            stashed = None

                    if stashed:
                        yield stashed
                        stashed = None

                    yield tok
                elif initial == '\\':                      # continued stmt
                    # This yield is new; needed for better idempotency:
                    if stashed:
                        yield stashed
                        stashed = None
                    yield (NL, token, spos, (lnum, pos), line)
                    continued = 1
                else:
                    if initial in '([{': parenlev = parenlev + 1
                    elif initial in ')]}': parenlev = parenlev - 1
                    if stashed:
                        yield stashed
                        stashed = None
                    yield (OP, token, spos, epos, line)
            else:
                yield (ERRORTOKEN, line[pos],
                           (lnum, pos), (lnum, pos+1), line)
                pos = pos + 1

    if stashed:
        yield stashed
        stashed = None

    for indent in indents[1:]:                 # pop remaining indent levels
        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')

if __name__ == '__main__':                     # testing
    import sys
    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
    else: tokenize(sys.stdin.readline)
literals.py000064400000003143150467362370006753 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Safely evaluate Python string literals without using eval()."""

import re

simple_escapes = {"a": "\a",
                  "b": "\b",
                  "f": "\f",
                  "n": "\n",
                  "r": "\r",
                  "t": "\t",
                  "v": "\v",
                  "'": "'",
                  '"': '"',
                  "\\": "\\"}

def escape(m):
    all, tail = m.group(0, 1)
    assert all.startswith("\\")
    esc = simple_escapes.get(tail)
    if esc is not None:
        return esc
    if tail.startswith("x"):
        hexes = tail[1:]
        if len(hexes) < 2:
            raise ValueError("invalid hex string escape ('\\%s')" % tail)
        try:
            i = int(hexes, 16)
        except ValueError:
            raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
    else:
        try:
            i = int(tail, 8)
        except ValueError:
            raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
    return chr(i)

def evalString(s):
    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
    q = s[0]
    if s[:3] == q*3:
        q = q*3
    assert s.endswith(q), repr(s[-len(q):])
    assert len(s) >= 2*len(q)
    s = s[len(q):-len(q)]
    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)

def test():
    for i in range(256):
        c = chr(i)
        s = repr(c)
        e = evalString(s)
        if e != c:
            print(i, c, s, e)


if __name__ == "__main__":
    test()
pgen.py000064400000032764150467362370006100 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

# Pgen imports
from . import grammar, token, tokenize

class PgenGrammar(grammar.Grammar):
    pass

class ParserGenerator(object):

    def __init__(self, filename, stream=None):
        close_stream = None
        if stream is None:
            stream = open(filename)
            close_stream = stream.close
        self.filename = filename
        self.stream = stream
        self.generator = tokenize.generate_tokens(stream.readline)
        self.gettoken() # Initialize lookahead
        self.dfas, self.startsymbol = self.parse()
        if close_stream is not None:
            close_stream()
        self.first = {} # map from symbol name to set of tokens
        self.addfirstsets()

    def make_grammar(self):
        c = PgenGrammar()
        names = list(self.dfas.keys())
        names.sort()
        names.remove(self.startsymbol)
        names.insert(0, self.startsymbol)
        for name in names:
            i = 256 + len(c.symbol2number)
            c.symbol2number[name] = i
            c.number2symbol[i] = name
        for name in names:
            dfa = self.dfas[name]
            states = []
            for state in dfa:
                arcs = []
                for label, next in sorted(state.arcs.items()):
                    arcs.append((self.make_label(c, label), dfa.index(next)))
                if state.isfinal:
                    arcs.append((0, dfa.index(state)))
                states.append(arcs)
            c.states.append(states)
            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
        c.start = c.symbol2number[self.startsymbol]
        return c

    def make_first(self, c, name):
        rawfirst = self.first[name]
        first = {}
        for label in sorted(rawfirst):
            ilabel = self.make_label(c, label)
            ##assert ilabel not in first # XXX failed on <> ... !=
            first[ilabel] = 1
        return first

    def make_label(self, c, label):
        # XXX Maybe this should be a method on a subclass of converter?
        ilabel = len(c.labels)
        if label[0].isalpha():
            # Either a symbol name or a named token
            if label in c.symbol2number:
                # A symbol name (a non-terminal)
                if label in c.symbol2label:
                    return c.symbol2label[label]
                else:
                    c.labels.append((c.symbol2number[label], None))
                    c.symbol2label[label] = ilabel
                    return ilabel
            else:
                # A named token (NAME, NUMBER, STRING)
                itoken = getattr(token, label, None)
                assert isinstance(itoken, int), label
                assert itoken in token.tok_name, label
                if itoken in c.tokens:
                    return c.tokens[itoken]
                else:
                    c.labels.append((itoken, None))
                    c.tokens[itoken] = ilabel
                    return ilabel
        else:
            # Either a keyword or an operator
            assert label[0] in ('"', "'"), label
            value = eval(label)
            if value[0].isalpha():
                # A keyword
                if value in c.keywords:
                    return c.keywords[value]
                else:
                    c.labels.append((token.NAME, value))
                    c.keywords[value] = ilabel
                    return ilabel
            else:
                # An operator (any non-numeric token)
                itoken = grammar.opmap[value] # Fails if unknown token
                if itoken in c.tokens:
                    return c.tokens[itoken]
                else:
                    c.labels.append((itoken, None))
                    c.tokens[itoken] = ilabel
                    return ilabel

    def addfirstsets(self):
        names = list(self.dfas.keys())
        names.sort()
        for name in names:
            if name not in self.first:
                self.calcfirst(name)
            #print name, self.first[name].keys()

    def calcfirst(self, name):
        dfa = self.dfas[name]
        self.first[name] = None # dummy to detect left recursion
        state = dfa[0]
        totalset = {}
        overlapcheck = {}
        for label, next in state.arcs.items():
            if label in self.dfas:
                if label in self.first:
                    fset = self.first[label]
                    if fset is None:
                        raise ValueError("recursion for rule %r" % name)
                else:
                    self.calcfirst(label)
                    fset = self.first[label]
                totalset.update(fset)
                overlapcheck[label] = fset
            else:
                totalset[label] = 1
                overlapcheck[label] = {label: 1}
        inverse = {}
        for label, itsfirst in overlapcheck.items():
            for symbol in itsfirst:
                if symbol in inverse:
                    raise ValueError("rule %s is ambiguous; %s is in the"
                                     " first sets of %s as well as %s" %
                                     (name, symbol, label, inverse[symbol]))
                inverse[symbol] = label
        self.first[name] = totalset

    def parse(self):
        dfas = {}
        startsymbol = None
        # MSTART: (NEWLINE | RULE)* ENDMARKER
        while self.type != token.ENDMARKER:
            while self.type == token.NEWLINE:
                self.gettoken()
            # RULE: NAME ':' RHS NEWLINE
            name = self.expect(token.NAME)
            self.expect(token.OP, ":")
            a, z = self.parse_rhs()
            self.expect(token.NEWLINE)
            #self.dump_nfa(name, a, z)
            dfa = self.make_dfa(a, z)
            #self.dump_dfa(name, dfa)
            oldlen = len(dfa)
            self.simplify_dfa(dfa)
            newlen = len(dfa)
            dfas[name] = dfa
            #print name, oldlen, newlen
            if startsymbol is None:
                startsymbol = name
        return dfas, startsymbol

    def make_dfa(self, start, finish):
        # To turn an NFA into a DFA, we define the states of the DFA
        # to correspond to *sets* of states of the NFA.  Then do some
        # state reduction.  Let's represent sets as dicts with 1 for
        # values.
        assert isinstance(start, NFAState)
        assert isinstance(finish, NFAState)
        def closure(state):
            base = {}
            addclosure(state, base)
            return base
        def addclosure(state, base):
            assert isinstance(state, NFAState)
            if state in base:
                return
            base[state] = 1
            for label, next in state.arcs:
                if label is None:
                    addclosure(next, base)
        states = [DFAState(closure(start), finish)]
        for state in states: # NB states grows while we're iterating
            arcs = {}
            for nfastate in state.nfaset:
                for label, next in nfastate.arcs:
                    if label is not None:
                        addclosure(next, arcs.setdefault(label, {}))
            for label, nfaset in sorted(arcs.items()):
                for st in states:
                    if st.nfaset == nfaset:
                        break
                else:
                    st = DFAState(nfaset, finish)
                    states.append(st)
                state.addarc(st, label)
        return states # List of DFAState instances; first one is start

    def dump_nfa(self, name, start, finish):
        print("Dump of NFA for", name)
        todo = [start]
        for i, state in enumerate(todo):
            print("  State", i, state is finish and "(final)" or "")
            for label, next in state.arcs:
                if next in todo:
                    j = todo.index(next)
                else:
                    j = len(todo)
                    todo.append(next)
                if label is None:
                    print("    -> %d" % j)
                else:
                    print("    %s -> %d" % (label, j))

    def dump_dfa(self, name, dfa):
        print("Dump of DFA for", name)
        for i, state in enumerate(dfa):
            print("  State", i, state.isfinal and "(final)" or "")
            for label, next in sorted(state.arcs.items()):
                print("    %s -> %d" % (label, dfa.index(next)))

    def simplify_dfa(self, dfa):
        # This is not theoretically optimal, but works well enough.
        # Algorithm: repeatedly look for two states that have the same
        # set of arcs (same labels pointing to the same nodes) and
        # unify them, until things stop changing.

        # dfa is a list of DFAState instances
        changes = True
        while changes:
            changes = False
            for i, state_i in enumerate(dfa):
                for j in range(i+1, len(dfa)):
                    state_j = dfa[j]
                    if state_i == state_j:
                        #print "  unify", i, j
                        del dfa[j]
                        for state in dfa:
                            state.unifystate(state_j, state_i)
                        changes = True
                        break

    def parse_rhs(self):
        # RHS: ALT ('|' ALT)*
        a, z = self.parse_alt()
        if self.value != "|":
            return a, z
        else:
            aa = NFAState()
            zz = NFAState()
            aa.addarc(a)
            z.addarc(zz)
            while self.value == "|":
                self.gettoken()
                a, z = self.parse_alt()
                aa.addarc(a)
                z.addarc(zz)
            return aa, zz

    def parse_alt(self):
        # ALT: ITEM+
        a, b = self.parse_item()
        while (self.value in ("(", "[") or
               self.type in (token.NAME, token.STRING)):
            c, d = self.parse_item()
            b.addarc(c)
            b = d
        return a, b

    def parse_item(self):
        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
        if self.value == "[":
            self.gettoken()
            a, z = self.parse_rhs()
            self.expect(token.OP, "]")
            a.addarc(z)
            return a, z
        else:
            a, z = self.parse_atom()
            value = self.value
            if value not in ("+", "*"):
                return a, z
            self.gettoken()
            z.addarc(a)
            if value == "+":
                return a, z
            else:
                return a, a

    def parse_atom(self):
        # ATOM: '(' RHS ')' | NAME | STRING
        if self.value == "(":
            self.gettoken()
            a, z = self.parse_rhs()
            self.expect(token.OP, ")")
            return a, z
        elif self.type in (token.NAME, token.STRING):
            a = NFAState()
            z = NFAState()
            a.addarc(z, self.value)
            self.gettoken()
            return a, z
        else:
            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
                             self.type, self.value)

    def expect(self, type, value=None):
        if self.type != type or (value is not None and self.value != value):
            self.raise_error("expected %s/%s, got %s/%s",
                             type, value, self.type, self.value)
        value = self.value
        self.gettoken()
        return value

    def gettoken(self):
        tup = next(self.generator)
        while tup[0] in (tokenize.COMMENT, tokenize.NL):
            tup = next(self.generator)
        self.type, self.value, self.begin, self.end, self.line = tup
        #print token.tok_name[self.type], repr(self.value)

    def raise_error(self, msg, *args):
        if args:
            try:
                msg = msg % args
            except:
                msg = " ".join([msg] + list(map(str, args)))
        raise SyntaxError(msg, (self.filename, self.end[0],
                                self.end[1], self.line))

class NFAState(object):

    def __init__(self):
        self.arcs = [] # list of (label, NFAState) pairs

    def addarc(self, next, label=None):
        assert label is None or isinstance(label, str)
        assert isinstance(next, NFAState)
        self.arcs.append((label, next))

class DFAState(object):

    def __init__(self, nfaset, final):
        assert isinstance(nfaset, dict)
        assert isinstance(next(iter(nfaset)), NFAState)
        assert isinstance(final, NFAState)
        self.nfaset = nfaset
        self.isfinal = final in nfaset
        self.arcs = {} # map from label to DFAState

    def addarc(self, next, label):
        assert isinstance(label, str)
        assert label not in self.arcs
        assert isinstance(next, DFAState)
        self.arcs[label] = next

    def unifystate(self, old, new):
        for label, next in self.arcs.items():
            if next is old:
                self.arcs[label] = new

    def __eq__(self, other):
        # Equality test -- ignore the nfaset instance variable
        assert isinstance(other, DFAState)
        if self.isfinal != other.isfinal:
            return False
        # Can't just return self.arcs == other.arcs, because that
        # would invoke this method recursively, with cycles...
        if len(self.arcs) != len(other.arcs):
            return False
        for label, next in self.arcs.items():
            if next is not other.arcs.get(label):
                return False
        return True

    __hash__ = None # For Py3 compatibility.

def generate_grammar(filename="Grammar.txt"):
    p = ParserGenerator(filename)
    return p.make_grammar()
driver.py000064400000013521150467362370006430 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Parser driver.

This provides a high-level interface to parse a file into a syntax tree.

"""

__author__ = "Guido van Rossum <guido@python.org>"

__all__ = ["Driver", "load_grammar"]

# Python imports
import io
import os
import logging
import pkgutil
import sys

# Pgen imports
from . import grammar, parse, token, tokenize, pgen


class Driver(object):

    def __init__(self, grammar, convert=None, logger=None):
        self.grammar = grammar
        if logger is None:
            logger = logging.getLogger()
        self.logger = logger
        self.convert = convert

    def parse_tokens(self, tokens, debug=False):
        """Parse a series of tokens and return the syntax tree."""
        # XXX Move the prefix computation into a wrapper around tokenize.
        p = parse.Parser(self.grammar, self.convert)
        p.setup()
        lineno = 1
        column = 0
        type = value = start = end = line_text = None
        prefix = ""
        for quintuple in tokens:
            type, value, start, end, line_text = quintuple
            if start != (lineno, column):
                assert (lineno, column) <= start, ((lineno, column), start)
                s_lineno, s_column = start
                if lineno < s_lineno:
                    prefix += "\n" * (s_lineno - lineno)
                    lineno = s_lineno
                    column = 0
                if column < s_column:
                    prefix += line_text[column:s_column]
                    column = s_column
            if type in (tokenize.COMMENT, tokenize.NL):
                prefix += value
                lineno, column = end
                if value.endswith("\n"):
                    lineno += 1
                    column = 0
                continue
            if type == token.OP:
                type = grammar.opmap[value]
            if debug:
                self.logger.debug("%s %r (prefix=%r)",
                                  token.tok_name[type], value, prefix)
            if p.addtoken(type, value, (prefix, start)):
                if debug:
                    self.logger.debug("Stop.")
                break
            prefix = ""
            lineno, column = end
            if value.endswith("\n"):
                lineno += 1
                column = 0
        else:
            # We never broke out -- EOF is too soon (how can this happen???)
            raise parse.ParseError("incomplete input",
                                   type, value, (prefix, start))
        return p.rootnode

    def parse_stream_raw(self, stream, debug=False):
        """Parse a stream and return the syntax tree."""
        tokens = tokenize.generate_tokens(stream.readline)
        return self.parse_tokens(tokens, debug)

    def parse_stream(self, stream, debug=False):
        """Parse a stream and return the syntax tree."""
        return self.parse_stream_raw(stream, debug)

    def parse_file(self, filename, encoding=None, debug=False):
        """Parse a file and return the syntax tree."""
        with io.open(filename, "r", encoding=encoding) as stream:
            return self.parse_stream(stream, debug)

    def parse_string(self, text, debug=False):
        """Parse a string and return the syntax tree."""
        tokens = tokenize.generate_tokens(io.StringIO(text).readline)
        return self.parse_tokens(tokens, debug)


def _generate_pickle_name(gt):
    head, tail = os.path.splitext(gt)
    if tail == ".txt":
        tail = ""
    return head + tail + ".".join(map(str, sys.version_info)) + ".pickle"


def load_grammar(gt="Grammar.txt", gp=None,
                 save=True, force=False, logger=None):
    """Load the grammar (maybe from a pickle)."""
    if logger is None:
        logger = logging.getLogger()
    gp = _generate_pickle_name(gt) if gp is None else gp
    if force or not _newer(gp, gt):
        logger.info("Generating grammar tables from %s", gt)
        g = pgen.generate_grammar(gt)
        if save:
            logger.info("Writing grammar tables to %s", gp)
            try:
                g.dump(gp)
            except OSError as e:
                logger.info("Writing failed: %s", e)
    else:
        g = grammar.Grammar()
        g.load(gp)
    return g


def _newer(a, b):
    """Inquire whether file a was written since file b."""
    if not os.path.exists(a):
        return False
    if not os.path.exists(b):
        return True
    return os.path.getmtime(a) >= os.path.getmtime(b)


def load_packaged_grammar(package, grammar_source):
    """Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    """
    if os.path.isfile(grammar_source):
        return load_grammar(grammar_source)
    pickled_name = _generate_pickle_name(os.path.basename(grammar_source))
    data = pkgutil.get_data(package, pickled_name)
    g = grammar.Grammar()
    g.loads(data)
    return g


def main(*args):
    """Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    """
    if not args:
        args = sys.argv[1:]
    logging.basicConfig(level=logging.INFO, stream=sys.stdout,
                        format='%(message)s')
    for gt in args:
        load_grammar(gt, save=True, force=True)
    return True

if __name__ == "__main__":
    sys.exit(int(not main()))
grammar.py000064400000012635150467362370006570 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

"""

# Python imports
import pickle

# Local imports
from . import token


class Grammar(object):
    """Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    """

    def __init__(self):
        self.symbol2number = {}
        self.number2symbol = {}
        self.states = []
        self.dfas = {}
        self.labels = [(0, "EMPTY")]
        self.keywords = {}
        self.tokens = {}
        self.symbol2label = {}
        self.start = 256

    def dump(self, filename):
        """Dump the grammar tables to a pickle file."""
        with open(filename, "wb") as f:
            pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)

    def load(self, filename):
        """Load the grammar tables from a pickle file."""
        with open(filename, "rb") as f:
            d = pickle.load(f)
        self.__dict__.update(d)

    def loads(self, pkl):
        """Load the grammar tables from a pickle bytes object."""
        self.__dict__.update(pickle.loads(pkl))

    def copy(self):
        """
        Copy the grammar.
        """
        new = self.__class__()
        for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
                          "tokens", "symbol2label"):
            setattr(new, dict_attr, getattr(self, dict_attr).copy())
        new.labels = self.labels[:]
        new.states = self.states[:]
        new.start = self.start
        return new

    def report(self):
        """Dump the grammar tables to standard output, for debugging."""
        from pprint import pprint
        print("s2n")
        pprint(self.symbol2number)
        print("n2s")
        pprint(self.number2symbol)
        print("states")
        pprint(self.states)
        print("dfas")
        pprint(self.dfas)
        print("labels")
        pprint(self.labels)
        print("start", self.start)


# Map from operator to number (since tokenize doesn't do this)

opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
"""

opmap = {}
for line in opmap_raw.splitlines():
    if line:
        op, name = line.split()
        opmap[op] = getattr(token, name)
token.py000075500000002424150467362370006260 0ustar00#! /usr/bin/python3.8

"""Token constants (from "token.h")."""

#  Taken from Python (r53757) and modified to include some tokens
#   originally monkeypatched in by pgen2.tokenize

#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
ATEQUAL = 51
OP = 52
COMMENT = 53
NL = 54
RARROW = 55
AWAIT = 56
ASYNC = 57
ERRORTOKEN = 58
COLONEQUAL = 59
N_TOKENS = 60
NT_OFFSET = 256
#--end constants--

tok_name = {}
for _name, _value in list(globals().items()):
    if type(_value) is type(0):
        tok_name[_value] = _name


def ISTERMINAL(x):
    return x < NT_OFFSET

def ISNONTERMINAL(x):
    return x >= NT_OFFSET

def ISEOF(x):
    return x == ENDMARKER
pgen.pyc000064400000027716150471355400006233 0ustar00�
{fc@s�ddlmZmZmZdejfd��YZdefd��YZdefd��YZdefd	��YZ	d
d�Z
dS(
i(tgrammarttokenttokenizetPgenGrammarcBseZRS((t__name__t
__module__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRstParserGeneratorcBs�eZdd�Zd�Zd�Zd�Zd�Zd�Zd�Z	d�Z
d�Zd	�Zd
�Z
d�Zd�Zd
�Zd�Zdd�Zd�Zd�ZRS(cCs�d}|dkr*t|�}|j}n||_||_tj|j�|_|j	�|j
�\|_|_|dk	r�|�ni|_
|j�dS(N(tNonetopentclosetfilenametstreamRtgenerate_tokenstreadlinet	generatortgettokentparsetdfaststartsymboltfirsttaddfirstsets(tselfR
Rtclose_stream((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__init__s		

	cCs�t�}|jj�}|j�|j|j�|jd|j�x;|D]3}dt|j�}||j|<||j	|<qLWx�|D]�}|j|}g}x�|D]�}g}xKt
|jj��D]4\}	}
|j
|j||	�|j|
�f�q�W|jr,|j
d|j|�f�n|j
|�q�W|jj
|�||j||�f|j|j|<q�W|j|j|_|S(Nii(RRtkeystsorttremoveRtinserttlent
symbol2numbert
number2symboltsortedtarcst	iteritemstappendt
make_labeltindextisfinaltstatest
make_firsttstart(RtctnamestnametitdfaR&tstateR tlabeltnext((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytmake_grammars.	





",	*cCsJ|j|}i}x0t|�D]"}|j||�}d||<q W|S(Ni(RRR#(RR)R+trawfirstRR/tilabel((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR'4s
cCs�t|j�}|dj�r||jkry||jkrH|j|S|jj|j|df�||j|<|Sq�tt|d�}t	|t
tf�s�t|��|tj
ks�t|��||jkr�|j|S|jj|df�||j|<|Sn�|ddks't|��t|�}|dj�r�||jkr]|j|S|jjtj|f�||j|<|SnNtj|}||jkr�|j|S|jj|df�||j|<|SdS(Nit"t'(R4R5(RtlabelstisalphaRtsymbol2labelR"RtgetattrRt
isinstancetinttlongtAssertionErrorttok_namettokenstevaltkeywordstNAMERtopmap(RR)R/R3titokentvalue((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR#=s<
!



cCsM|jj�}|j�x-|D]%}||jkr |j|�q q WdS(N(RRRRt	calcfirst(RR*R+((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRks


c	Csj|j|}d|j|<|d}i}i}x�|jj�D]�\}}||jkr�||jkr�|j|}|dkr�td|��q�n|j|�|j|}|j|�|||<q@d||<id|6||<q@Wi}	xd|j�D]V\}}
xG|
D]?}||	krGtd||||	|f��n||	|<qWq�W||j|<dS(Nisrecursion for rule %risArule %s is ambiguous; %s is in the first sets of %s as well as %s(RRRR R!t
ValueErrorRFtupdate(RR+R-R.ttotalsettoverlapcheckR/R0tfsettinversetitsfirsttsymbol((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRFss2









c	Cs�i}d}x�|jtjkr�x |jtjkrC|j�q$W|jtj�}|jtjd�|j	�\}}|jtj�|j
||�}t|�}|j|�t|�}|||<|dkr|}qqW||fS(Nt:(
RttypeRt	ENDMARKERtNEWLINERtexpectRBtOPt	parse_rhstmake_dfaRtsimplify_dfa(	RRRR+tatzR-toldlentnewlen((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR�s"


csDt|t�st�t|t�s*t��fd�}�fd��t||�|�g}x�|D]�}i}xS|jD]H}x?|jD]4\}}	|dk	r��|	|j|i��q�q�Wq}Wxpt|j	��D]\\}}
x=|D]}|j|
kr�Pq�q�Wt|
|�}|j
|�|j||�q�WqgW|S(Ncsi}�||�|S(N((R.tbase(t
addclosure(s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytclosure�s
csit|t�st�||kr%dSd||<x3|jD](\}}|dkr9�||�q9q9WdS(Ni(R:tNFAStateR=R R(R.R\R/R0(R](s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR]�s
(R:R_R=tDFAStatetnfasetR Rt
setdefaultRR!R"taddarc(RR(tfinishR^R&R.R tnfastateR/R0Ratst((R]s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRV�s&
$

c
Cs�dG|GH|g}x�t|�D]�\}}dG|G||krEdpHdGHxu|jD]j\}}||kr~|j|�}	nt|�}	|j|�|dkr�d|	GHqTd||	fGHqTWqWdS(NsDump of NFA fors  States(final)ts	    -> %ds    %s -> %d(t	enumerateR R$RR"R(
RR+R(RdttodoR,R.R/R0tj((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_nfa�s		
cCs�dG|GHxtt|�D]f\}}dG|G|jr9dp<dGHx;t|jj��D]$\}}d||j|�fGHqTWqWdS(NsDump of DFA fors  States(final)Rgs    %s -> %d(RhR%RR R!R$(RR+R-R,R.R/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_dfa�s
	"cCs�t}x�|r�t}x�t|�D]x\}}xit|dt|��D]N}||}||krH||=x|D]}|j||�qrWt}PqHqHWq"Wq	WdS(Ni(tTruetFalseRhtrangeRt
unifystate(RR-tchangesR,tstate_iRjtstate_jR.((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRW�s	 

cCs�|j�\}}|jdkr+||fSt�}t�}|j|�|j|�xI|jdkr�|j�|j�\}}|j|�|j|�qZW||fSdS(Nt|(t	parse_altRER_RcR(RRXRYtaatzz((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRU�s
		



cCsr|j�\}}xS|jdks?|jtjtjfkrg|j�\}}|j|�|}qW||fS(Nt(t[(RxRy(t
parse_itemRERPRRBtSTRINGRc(RRXtbR)td((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRu
s

cCs�|jdkrU|j�|j�\}}|jtjd�|j|�||fS|j�\}}|j}|dkr�||fS|j�|j|�|dkr�||fS||fSdS(NRyt]t+t*(RR�(RERRURSRRTRct
parse_atom(RRXRYRE((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRzs


	



cCs�|jdkrH|j�|j�\}}|jtjd�||fS|jtjtjfkr�t	�}t	�}|j
||j�|j�||fS|jd|j|j�dS(NRxt)s+expected (...) or NAME or STRING, got %s/%s(RERRURSRRTRPRBR{R_Rctraise_error(RRXRY((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR�(s

		

	cCsc|j|ks*|dk	rL|j|krL|jd|||j|j�n|j}|j�|S(Nsexpected %s/%s, got %s/%s(RPRRER�R(RRPRE((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRS9s*		
cCsi|jj�}x/|dtjtjfkr@|jj�}qW|\|_|_|_|_|_	dS(Ni(
RR0RtCOMMENTtNLRPREtbegintendtline(Rttup((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRAscGss|r@y||}Wq@dj|gtt|��}q@Xnt||j|jd|jd|jf��dS(Nt ii(tjointmaptstrtSyntaxErrorR
R�R�(Rtmsgtargs((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR�Hs&N(RRRRR1R'R#RRFRRVRkRlRWRURuRzR�RSRR�(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR
s$				.				$					
			R_cBseZd�Zdd�ZRS(cCs
g|_dS(N(R (R((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRSscCsP|dks!t|t�s!t�t|t�s6t�|jj||f�dS(N(RR:R�R=R_R R"(RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRcVs!N(RRRRRc(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR_Qs	R`cBs2eZd�Zd�Zd�Zd�ZdZRS(cCspt|t�st�tt|�j�t�s6t�t|t�sKt�||_||k|_i|_dS(N(	R:tdictR=titerR0R_RaR%R (RRatfinal((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR]s!	cCsPt|t�st�||jks*t�t|t�s?t�||j|<dS(N(R:R�R=R R`(RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRcescCs@x9|jj�D](\}}||kr||j|<qqWdS(N(R R!(RtoldtnewR/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRpkscCs�t|t�st�|j|jkr+tSt|j�t|j�krMtSx9|jj�D](\}}||jj|�k	r]tSq]Wt	S(N(
R:R`R=R%RnRR R!tgetRm(RtotherR/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__eq__psN(RRRRcRpR�Rt__hash__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR`[s
				sGrammar.txtcCst|�}|j�S(N(RR1(R
tp((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytgenerate_grammar�sN(RgRRRtGrammarRtobjectRR_R`R�(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt<module>s�H
%pgen.pyo000064400000026551150471355400006243 0ustar00�
{fc@s�ddlmZmZmZdejfd��YZdefd��YZdefd��YZdefd	��YZ	d
d�Z
dS(
i(tgrammarttokenttokenizetPgenGrammarcBseZRS((t__name__t
__module__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRstParserGeneratorcBs�eZdd�Zd�Zd�Zd�Zd�Zd�Zd�Z	d�Z
d�Zd	�Zd
�Z
d�Zd�Zd
�Zd�Zdd�Zd�Zd�ZRS(cCs�d}|dkr*t|�}|j}n||_||_tj|j�|_|j	�|j
�\|_|_|dk	r�|�ni|_
|j�dS(N(tNonetopentclosetfilenametstreamRtgenerate_tokenstreadlinet	generatortgettokentparsetdfaststartsymboltfirsttaddfirstsets(tselfR
Rtclose_stream((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__init__s		

	cCs�t�}|jj�}|j�|j|j�|jd|j�x;|D]3}dt|j�}||j|<||j	|<qLWx�|D]�}|j|}g}x�|D]�}g}xKt
|jj��D]4\}	}
|j
|j||	�|j|
�f�q�W|jr,|j
d|j|�f�n|j
|�q�W|jj
|�||j||�f|j|j|<q�W|j|j|_|S(Nii(RRtkeystsorttremoveRtinserttlent
symbol2numbert
number2symboltsortedtarcst	iteritemstappendt
make_labeltindextisfinaltstatest
make_firsttstart(RtctnamestnametitdfaR&tstateR tlabeltnext((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytmake_grammars.	





",	*cCsJ|j|}i}x0t|�D]"}|j||�}d||<q W|S(Ni(RRR#(RR)R+trawfirstRR/tilabel((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR'4s
cCs�t|j�}|dj�r�||jkry||jkrH|j|S|jj|j|df�||j|<|Sq�tt|d�}||j	kr�|j	|S|jj|df�||j	|<|Sn�t
|�}|dj�r2||jkr|j|S|jjtj|f�||j|<|SnNt
j|}||j	krY|j	|S|jj|df�||j	|<|SdS(Ni(RtlabelstisalphaRtsymbol2labelR"RtgetattrRttokenstevaltkeywordstNAMERtopmap(RR)R/R3titokentvalue((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR#=s6




cCsM|jj�}|j�x-|D]%}||jkr |j|�q q WdS(N(RRRRt	calcfirst(RR*R+((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRks


c	Csj|j|}d|j|<|d}i}i}x�|jj�D]�\}}||jkr�||jkr�|j|}|dkr�td|��q�n|j|�|j|}|j|�|||<q@d||<id|6||<q@Wi}	xd|j�D]V\}}
xG|
D]?}||	krGtd||||	|f��n||	|<qWq�W||j|<dS(Nisrecursion for rule %risArule %s is ambiguous; %s is in the first sets of %s as well as %s(RRRR R!t
ValueErrorR?tupdate(RR+R-R.ttotalsettoverlapcheckR/R0tfsettinversetitsfirsttsymbol((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR?ss2









c	Cs�i}d}x�|jtjkr�x |jtjkrC|j�q$W|jtj�}|jtjd�|j	�\}}|jtj�|j
||�}t|�}|j|�t|�}|||<|dkr|}qqW||fS(Nt:(
RttypeRt	ENDMARKERtNEWLINERtexpectR;tOPt	parse_rhstmake_dfaRtsimplify_dfa(	RRRR+tatzR-toldlentnewlen((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR�s"


cs�fd�}�fd��t||�|�g}x�|D]�}i}xS|jD]H}x?|jD]4\}}	|dk	rc�|	|j|i��qcqcWqSWxpt|j��D]\\}}
x=|D]}|j|
kr�Pq�q�Wt|
|�}|j|�|j||�q�Wq=W|S(Ncsi}�||�|S(N((R.tbase(t
addclosure(s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytclosure�s
csT||krdSd||<x3|jD](\}}|dkr$�||�q$q$WdS(Ni(R R(R.RUR/R0(RV(s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRV�s
(	tDFAStatetnfasetR Rt
setdefaultRR!R"taddarc(RR(tfinishRWR&R.R tnfastateR/R0RYtst((RVs*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRO�s"
$

c
Cs�dG|GH|g}x�t|�D]�\}}dG|G||krEdpHdGHxu|jD]j\}}||kr~|j|�}	nt|�}	|j|�|dkr�d|	GHqTd||	fGHqTWqWdS(NsDump of NFA fors  States(final)ts	    -> %ds    %s -> %d(t	enumerateR R$RR"R(
RR+R(R\ttodoR,R.R/R0tj((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_nfa�s		
cCs�dG|GHxtt|�D]f\}}dG|G|jr9dp<dGHx;t|jj��D]$\}}d||j|�fGHqTWqWdS(NsDump of DFA fors  States(final)R_s    %s -> %d(R`R%RR R!R$(RR+R-R,R.R/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_dfa�s
	"cCs�t}x�|r�t}x�t|�D]x\}}xit|dt|��D]N}||}||krH||=x|D]}|j||�qrWt}PqHqHWq"Wq	WdS(Ni(tTruetFalseR`trangeRt
unifystate(RR-tchangesR,tstate_iRbtstate_jR.((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRP�s	 

cCs�|j�\}}|jdkr+||fSt�}t�}|j|�|j|�xI|jdkr�|j�|j�\}}|j|�|j|�qZW||fSdS(Nt|(t	parse_altR>tNFAStateR[R(RRQRRtaatzz((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRN�s
		



cCsr|j�\}}xS|jdks?|jtjtjfkrg|j�\}}|j|�|}qW||fS(Nt(t[(RqRr(t
parse_itemR>RIRR;tSTRINGR[(RRQtbR)td((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRm
s

cCs�|jdkrU|j�|j�\}}|jtjd�|j|�||fS|j�\}}|j}|dkr�||fS|j�|j|�|dkr�||fS||fSdS(NRrt]t+t*(RxRy(R>RRNRLRRMR[t
parse_atom(RRQRRR>((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRss


	



cCs�|jdkrH|j�|j�\}}|jtjd�||fS|jtjtjfkr�t	�}t	�}|j
||j�|j�||fS|jd|j|j�dS(NRqt)s+expected (...) or NAME or STRING, got %s/%s(R>RRNRLRRMRIR;RtRnR[traise_error(RRQRR((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRz(s

		

	cCsc|j|ks*|dk	rL|j|krL|jd|||j|j�n|j}|j�|S(Nsexpected %s/%s, got %s/%s(RIRR>R|R(RRIR>((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRL9s*		
cCsi|jj�}x/|dtjtjfkr@|jj�}qW|\|_|_|_|_|_	dS(Ni(
RR0RtCOMMENTtNLRIR>tbegintendtline(Rttup((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRAscGss|r@y||}Wq@dj|gtt|��}q@Xnt||j|jd|jd|jf��dS(Nt ii(tjointmaptstrtSyntaxErrorR
R�R�(Rtmsgtargs((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR|Hs&N(RRRRR1R'R#RR?RRORcRdRPRNRmRsRzRLRR|(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR
s$				.				$					
			RncBseZd�Zdd�ZRS(cCs
g|_dS(N(R (R((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRSscCs|jj||f�dS(N(R R"(RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR[VsN(RRRRR[(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRnQs	RXcBs2eZd�Zd�Zd�Zd�ZdZRS(cCs%||_||k|_i|_dS(N(RYR%R (RRYtfinal((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR]s	cCs||j|<dS(N(R (RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR[escCs@x9|jj�D](\}}||kr||j|<qqWdS(N(R R!(RtoldtnewR/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRhkscCsx|j|jkrtSt|j�t|j�kr8tSx9|jj�D](\}}||jj|�k	rHtSqHWtS(N(R%RfRR R!tgetRe(RtotherR/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__eq__psN(RRRR[RhR�Rt__hash__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRX[s
				sGrammar.txtcCst|�}|j�S(N(RR1(R
tp((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytgenerate_grammar�sN(R_RRRtGrammarRtobjectRRnRXR�(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt<module>s�H
%conv.pyo000064400000015575150471355400006263 0ustar00�
{fc@sEdZddlZddlmZmZdejfd��YZdS(s�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

i����N(tgrammarttokent	ConvertercBs2eZdZd�Zd�Zd�Zd�ZRS(s2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs(|j|�|j|�|j�dS(s<Load the grammar tables from the text files written by pgen.N(tparse_graminit_htparse_graminit_ct
finish_off(tselft
graminit_ht
graminit_c((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pytrun/s

c	Cs�yt|�}Wn#tk
r5}d||fGHtSXi|_i|_d}x�|D]�}|d7}tjd|�}|r�|j�r�d|||j�fGHqU|j�\}}t	|�}||j|<||j|<qUWt
S(s�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        sCan't open %s: %siis^#define\s+(\w+)\s+(\d+)$s%s(%s): can't parse %s(topentIOErrortFalset
symbol2numbert
number2symboltretmatchtstriptgroupstinttTrue(	Rtfilenametfterrtlinenotlinetmotsymboltnumber((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR5s&		

	
c!CsHyt|�}Wn#tk
r5}d||fGHtSXd}|d|j�}}|d|j�}}|d|j�}}i}g}x�|jd�r�x�|jd�r�tjd|�}tt|j	��\}	}
}g}xkt
|�D]]}
|d|j�}}tjd|�}tt|j	��\}}|j||f�q�W|d|j�}}|||	|
f<|d|j�}}q�Wtjd|�}tt|j	��\}}g}xxt
|�D]j}
|d|j�}}tjd|�}tt|j	��\}}	}
||	|
f}|j|�q�W|j|�|d|j�}}|d|j�}}q�W||_i}tjd	|�}t|j
d��}x:t
|�D],}|d|j�}}tjd
|�}|j
d�}tt|j
ddd
d��\}}}}||}|d|j�}}tjd|�}i}t|j
d��}x`t|�D]R\}}t|�}x7t
d�D])}|d|>@r�d||d|<q�q�Wq�W||f||<q�W|d|j�}}||_g}|d|j�}}tjd|�}t|j
d��}x�t
|�D]�}|d|j�}}tjd|�}|j	�\}}t|�}|dkr�d}nt|�}|j||f�qhW|d|j�}}||_|d|j�}}|d|j�}}tjd|�}t|j
d��}|d|j�}}|d|j�}}tjd|�}t|j
d��}|d|j�}}tjd|�}t|j
d��} | |_|d|j�}}y|d|j�}}Wntk
rCnXdS(s�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        sCan't open %s: %siisstatic arc s)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$s\s+{(\d+), (\d+)},$s'static state states_(\d+)\[(\d+)\] = {$s\s+{(\d+), arcs_(\d+)_(\d+)},$sstatic dfa dfas\[(\d+)\] = {$s0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$iiiis\s+("(?:\\\d\d\d)*")},$is!static label labels\[(\d+)\] = {$s\s+{(\d+), (0|"\w+")},$t0s
\s+(\d+),$s\s+{(\d+), labels},$s	\s+(\d+)$N(R
RRtnextt
startswithRRtmapRRtrangetappendtstatestgrouptevalt	enumeratetordtdfastNonetlabelststartt
StopIteration(!RRRRRRtallarcsR#Rtntmtktarcst_titjtstttstateR(tndfasRRtxtytztfirstt	rawbitsettctbyteR*tnlabelsR+((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyRTs�		
			-
				
cCs�i|_i|_xjt|j�D]Y\}\}}|tjkr_|dk	r_||j|<q"|dkr"||j|<q"q"WdS(s1Create additional useful structures.  (Internal).N(tkeywordsttokensR&R*RtNAMER)(Rtilabelttypetvalue((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR�s		"(t__name__t
__module__t__doc__R	RRR(((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR$s
				�(RIRtpgen2RRtGrammarR(((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyt<module>sparse.pyc000064400000016104150471355400006401 0ustar00�
{fc@sFdZddlmZdefd��YZdefd��YZdS(s�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

i(ttokent
ParseErrorcBseZdZd�ZRS(s(Exception to signal the parser is stuck.cCsHtj|d||||f�||_||_||_||_dS(Ns!%s: type=%r, value=%r, context=%r(t	Exceptiont__init__tmsgttypetvaluetcontext(tselfRRRR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs			(t__name__t
__module__t__doc__R(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRstParsercBsSeZdZdd�Zdd�Zd�Zd�Zd�Zd�Z	d�Z
RS(	s5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    cCs||_|pd�|_dS(s�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|S(N((tgrammartnode((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyt<lambda>WtN(R
tconvert(RR
R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR9s	cCsk|dkr|jj}n|ddgf}|jj|d|f}|g|_d|_t�|_dS(s�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        iN(tNoneR
tstarttdfaststacktrootnodetsett
used_names(RRtnewnodet
stackentry((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytsetupYs
	cCs�|j|||�}x�tr�|jd\}}}|\}}	||}
xq|
D]\}}|jj|\}
}||kr
|
dks�t�|j||||�|}xV||d|fgkr|j�|js�tS|jd\}}}|\}}	q�WtS|
dkrQ|jj	|
}|\}}||kre|j
|
|jj	|
||�PqeqQqQWd|f|
kr�|j�|js�td|||��q�qtd|||��qWdS(s<Add a token; return True iff this is the end of the program.i����iistoo much inputs	bad inputN(tclassifytTrueRR
tlabelstAssertionErrortshifttpoptFalseRtpushR(RRRRtilabeltdfatstateRtstatestfirsttarcstitnewstatetttvtitsdfat	itsstatestitsfirst((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytaddtokenqs<	

	 
	cCs�|tjkrG|jj|�|jjj|�}|dk	rG|Sn|jjj|�}|dkr�t	d|||��n|S(s&Turn a token into a label.  (Internal)s	bad tokenN(
RtNAMERtaddR
tkeywordstgetRttokensR(RRRRR$((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR�sc	Csw|jd\}}}|||df}|j|j|�}|dk	r]|dj|�n|||f|jd<dS(sShift a token.  (Internal)i����N(RRRR
tappend(	RRRR+RR%R&RR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR �sc	Cs[|jd\}}}|d|gf}|||f|jd<|jj|d|f�dS(sPush a nonterminal.  (Internal)i����iN(RRR7(	RRtnewdfaR+RR%R&RR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR#�scCs�|jj�\}}}|j|j|�}|dk	r�|jrl|jd\}}}|dj|�q�||_|j|j_ndS(sPop a nonterminal.  (Internal)i����N(RR!RR
RR7RR(RtpopdfatpopstatetpopnodeRR%R&R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR!�s		N(R	R
RRRRR1RR R#R!(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs 	0				N(RRRRRtobjectR(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyt<module>s__init__.pyo000064400000000256150471355400007043 0ustar00�
{fc@s
dZdS(sThe pgen2 package.N(t__doc__(((s./usr/lib64/python2.7/lib2to3/pgen2/__init__.pyt<module>ttokenize.pyc000064400000041113150471355400007115 0ustar00�
{fc@sdZdZdZddlZddlZddlmZmZddlTddl	m
Z
gee
�D]Zed	d
krge^qgddd
gZ
[
yeWnek
r�eZnXd�Zd�Zd�ZdZdZeede�ee�ZdZdZdZdZdZeeeee�ZdZedd�ee�ZdeZeee�Z ede d�Z!ee!e e�Z"dZ#d Z$d!Z%d"Z&ed#d$�Z'ed%d&�Z(ed'd(d)d*d+d,d-d.d/�	Z)d0Z*ed1d2�Z+ee)e*e+�Z,ee"e,e(e�Z-ee-Z.ed3ed4d�d5ed6d��Z/edee'�Z0eee0e"e,e/e�Z1e2ej3e.e1e%e&f�\Z4Z5Z6Z7i&ej3e#�d46ej3e$�d66e6d76e7d86e6d96e7d:6e6d;6e7d<6e6d=6e7d>6e6d?6e7d@6e6dA6e7dB6e6dC6e7dD6e6dE6e7dF6e6dG6e7dH6e6dI6e7dJ6e6dK6e7dL6e6dM6e7dN6e6dO6e7dP6e6dQ6e7dR6e6dS6e7dT6ddU6ddV6ddW6ddX6ddY6ddZ6Z9iZ:xd�D]Z;e;e:e;<q�WiZ<xd�D]Z;e;e<e;<qWdwZ=dxe>fdy��YZ?dze>fd{��YZ@d|�ZAeAd}�ZBd~�ZCdd�d���YZDej3d��ZEej3d��ZFd��ZGd��ZHd��ZId��ZJeKd�krddlLZLeMeLjN�dkreBeOeLjNd�jP�qeBeLjQjP�ndS(�s�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.sKa-Ping Yee <ping@lfw.org>s@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaroi����N(tBOM_UTF8tlookup(t*i(ttokenit_ttokenizetgenerate_tokenst
untokenizecGsddj|�dS(Nt(t|t)(tjoin(tchoices((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytgroup0tcGst|�dS(NR(R
(R((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytany1RcGst|�dS(Nt?(R
(R((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytmaybe2Rs[ \f\t]*s	#[^\r\n]*s\\\r?\ns[a-zA-Z_]\w*s
0[bB][01]*s0[xX][\da-fA-F]*[lL]?s0[oO]?[0-7]*[lL]?s
[1-9]\d*[lL]?s[eE][-+]?\d+s\d+\.\d*s\.\d+s\d+s\d+[jJ]s[jJ]s[^'\\]*(?:\\.[^'\\]*)*'s[^"\\]*(?:\\.[^"\\]*)*"s%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''s%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""s[ubUB]?[rR]?'''s[ubUB]?[rR]?"""s&[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'s&[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"s\*\*=?s>>=?s<<=?s<>s!=s//=?s->s[+\-*/%&@|^=<>]=?t~s[][(){}]s\r?\ns[:;.,`@]s'[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*t's'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*t"s'''s"""sr'''sr"""su'''su"""sb'''sb"""sur'''sur"""sbr'''sbr"""sR'''sR"""sU'''sU"""sB'''sB"""suR'''suR"""sUr'''sUr"""sUR'''sUR"""sbR'''sbR"""sBr'''sBr"""sBR'''sBR"""trtRtutUtbtBsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"it
TokenErrorcBseZRS((t__name__t
__module__(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�stStopTokenizingcBseZRS((RR(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�sc	CsA|\}}|\}}d||||t|t|�fGHdS(Ns%d,%d-%d,%d:	%s	%s(ttok_nametrepr(	ttypeRtstarttendtlinetsrowtscolterowtecol((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt
printtoken�scCs)yt||�Wntk
r$nXdS(s:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N(t
tokenize_loopR(treadlinet
tokeneater((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�s

cCs%xt|�D]}||�q
WdS(N(R(R+R,t
token_info((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR*�stUntokenizercBs,eZd�Zd�Zd�Zd�ZRS(cCsg|_d|_d|_dS(Nii(ttokenstprev_rowtprev_col(tself((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt__init__�s		cCsO|\}}||jks!t�||j}|rK|jjd|�ndS(Nt (R0tAssertionErrorR1R/tappend(R2R"trowtcolt
col_offset((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytadd_whitespace�s

cCs�x�|D]�}t|�dkr3|j||�Pn|\}}}}}|j|�|jj|�|\|_|_|ttfkr|jd7_d|_qqWdj	|j�S(NiiiR(
tlentcompatR:R/R6R0R1tNEWLINEtNLR(R2titerablettttok_typeRR"R#R$((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�s

c	Cs%t}g}|jj}|\}}|ttfkrC|d7}n|ttfkr^t}nx�|D]�}|d \}}|ttfkr�|d7}n|tkr�|j|�qenZ|t	kr�|j
�qen>|ttfkr�t}n#|r|r||d�t}n||�qeWdS(NR4ii����(tFalseR/R6tNAMEtNUMBERR=R>tTruetINDENTtDEDENTtpop(	R2RR?t	startlinetindentsttoks_appendttoknumttokvalttok((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR<�s0
	



		(RRR3R:RR<(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR.�s			s&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCs^|d j�jdd�}|dks7|jd�r;dS|dksV|jd
�rZdS|S(s(Imitates get_normal_name in tokenizer.c.iRt-sutf-8sutf-8-slatin-1s
iso-8859-1siso-latin-1slatin-1-siso-8859-1-siso-latin-1-(slatin-1s
iso-8859-1siso-latin-1(slatin-1-siso-8859-1-siso-latin-1-(tlowertreplacet
startswith(torig_enctenc((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt_get_normal_name�scs�t�d}d}�fd�}�fd�}|�}|jt�rat�|d}d}n|sq|gfS||�}|r�||gfStj|�s�||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS(s
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    sutf-8cs'y��SWntk
r"t�SXdS(N(t
StopIterationtbytes((R+(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytread_or_stops
cs�y|jd�}Wntk
r'dSXtj|�}|sAdSt|jd��}yt|�}Wn!tk
r�t	d|��nX�r�|j
dkr�t	d��n|d7}n|S(Ntasciiisunknown encoding: sutf-8sencoding problem: utf-8s-sig(tdecodetUnicodeDecodeErrortNonet	cookie_retmatchRUR
RtLookupErrortSyntaxErrortname(R$tline_stringR^tencodingtcodec(t	bom_found(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytfind_cookies"


is	utf-8-sigN(RBR\RRRREtblank_reR^(R+RctdefaultRXRftfirsttsecond((ReR+s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytdetect_encoding�s0	
	


	
cCst�}|j|�S(s�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited intput:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    (R.R(R?tut((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRFs	ccsVd}}}tjdd}}d\}}d}dg}	x�y
|�}
Wntk
rfd}
nX|d}dt|
�}}|r{|
s�td|
f�n|j|
�}|r|jd�}}t||
| |
||f||
fVd\}}d}q�|ra|
ddkra|
d	d
krat	||
|
|t|
�f|fVd}d}q@q�||
}||
}q@n`|dkr�|r�|
s�Pnd}xv||kr|
|dkr�|d}n?|
|dkr�|t
dt
}n|
|d
krd}nP|d}q�W||kr'Pn|
|dkr|
|dkr�|
|jd�}|t|�}t|||f||t|�f|
fVt
|
|||f|t|
�f|
fVq@t
tf|
|dk|
|||f|t|
�f|
fVq@n||	dkrI|	j|�t|
| |df||f|
fVnx�||	dkr�||	kr�tdd|||
f��n|	d }	td||f||f|
fVqLWn$|
s�td|dff�nd}x||kr�tj|
|�}|r�|jd�\}}||f||f|}}}|
||!|
|}}||kss|dkr�|dkr�t||||
fVq�|dkr�t}|dkr�t
}n|||||
fVq�|dkr|jd�s�t�t||||
fVq�|tkr�t|}|j|
|�}|rh|jd�}|
||!}t||||f|
fVq�||f}
|
|}|
}Pq�|tks�|d tks�|d tkr(|ddkr||f}
t|p�t|dp�t|d}|
|d}}|
}Pq�t||||
fVq�||krKt||||
fVq�|dkrzt
||||f|
fVd}q�|dkr�|d}n|dkr�|d}nt||||
fVq�t	|
|||f||df|
fV|d}q�Wq@Wx2|	dD]&}td|df|dfdfVqWtd|df|dfdfVdS(sT
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    logical line; continuation lines are included.
    iRt
0123456789RisEOF in multi-line stringi����s\
i����s\
R4s	ss#
t#s
i����s3unindent does not match any outer indentation levels
<tokenize>sEOF in multi-line statementt.s
iis\s([{s)]}N(Ri(Ri(tstringt
ascii_lettersR\RVR;RR^R#tSTRINGt
ERRORTOKENttabsizetrstriptCOMMENTR>R6RFtIndentationErrorRGt
pseudoprogtspanRDR=tendswithR5t
triple_quotedtendprogst
single_quotedRCtOPt	ENDMARKER(R+tlnumtparenlevt	continuedt	namecharstnumcharstcontstrtneedconttcontlineRJR$tpostmaxtstrstarttendprogtendmatchR#tcolumnt
comment_tokentnl_postpseudomatchR"tsposteposRtinitialtnewlinetindent((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR[s�	



	&



	$
# 
'
' 	


	


$t__main__(s'''s"""sr'''sr"""sR'''sR"""su'''su"""sU'''sU"""sb'''sb"""sB'''sB"""sur'''sur"""sUr'''sUr"""suR'''suR"""sUR'''sUR"""sbr'''sbr"""sBr'''sBr"""sbR'''sbR"""sBR'''sBR"""(RRsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"((Rt__doc__t
__author__t__credits__RptretcodecsRRtlib2to3.pgen2.tokenRRtdirtxt__all__RWt	NameErrortstrR
RRt
WhitespacetCommenttIgnoretNamet	Binnumbert	Hexnumbert	Octnumbert	Decnumbert	IntnumbertExponentt
PointfloattExpfloattFloatnumbert
ImagnumbertNumbertSingletDoubletSingle3tDouble3tTripletStringtOperatortBrackettSpecialtFunnyt
PlainTokentTokentContStrtPseudoExtrastPseudoTokentmaptcompilet	tokenprogRxtsingle3progtdouble3progR\R|R{R@R}Rtt	ExceptionRRR)RR*R.R]RgRURkRRRtsysR;targvtopenR+tstdin(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt<module>s�
/


			
		

'#

		8		I		�driver.pyc000064400000014477150471355400006575 0ustar00�
{fc@sdZdZddgZddlZddlZddlZddlZddlZddlZddl	m
Z
mZmZm
Z
mZdefd��YZd	�Zd
deedd�Zd�Zd
�Zd�Zedkrejee���ndS(sZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

s#Guido van Rossum <guido@python.org>tDrivertload_grammari����Ni(tgrammartparsettokenttokenizetpgencBsVeZddd�Zed�Zed�Zed�Zded�Zed�Z	RS(cCs:||_|dkr$tj�}n||_||_dS(N(RtNonetloggingt	getLoggertloggertconvert(tselfRRR
((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt__init__ s
		cCs=tj|j|j�}|j�d}d}d	}}}}	}
d}x�|D]�}|\}}}}	}
|||fkr
||f|ks�t||f|f��|\}
}||
kr�|d|
|7}|
}d}n||kr
||
||!7}|}q
n|tjtj	fkr`||7}|	\}}|j
d�rQ|d7}d}qQqQn|tjkrtj
|}n|r�|jjdtj|||�n|j||||f�r�|r�|jjd�nPnd}|	\}}|j
d�rQ|d7}d}qQqQWtjd||||f��|jS(
s4Parse a series of tokens and return the syntax tree.iius
s%s %r (prefix=%r)sStop.tsincomplete inputN(RtParserRRtsetupRtAssertionErrorRtCOMMENTtNLtendswithRtOPtopmapR
tdebugttok_nametaddtokent
ParseErrortrootnode(RttokensRtptlinenotcolumnttypetvaluetstarttendt	line_texttprefixt	quintuplets_linenots_column((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_tokens'sT

*	

	

	cCs"tj|j�}|j||�S(s*Parse a stream and return the syntax tree.(Rtgenerate_tokenstreadlineR)(RtstreamRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream_rawWscCs|j||�S(s*Parse a stream and return the syntax tree.(R-(RR,R((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream\scCs;tj|d|�}z|j||�SWd|j�XdS(s(Parse a file and return the syntax tree.trN(tcodecstopenR.tclose(RtfilenametencodingRR,((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt
parse_file`scCs+tjtj|�j�}|j||�S(s*Parse a string and return the syntax tree.(RR*tStringIOR+R)(RttextRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stringhsN(
t__name__t
__module__RR
tFalseR)R-R.R5R8(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRs0cCsRtjj|�\}}|dkr-d}n||djtttj��dS(Ns.txtRt.s.pickle(tostpathtsplitexttjointmaptstrtsystversion_info(tgttheadttail((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt_generate_pickle_namens	sGrammar.txtcCs�|dkrtj�}n|dkr3t|�n|}|sOt||�r�|jd|�tj|�}|r�|jd|�y|j|�Wq�t	k
r�}|jd|�q�Xq�nt
j�}|j|�|S(s'Load the grammar (maybe from a pickle).s!Generating grammar tables from %ssWriting grammar tables to %ssWriting failed: %sN(
RRR	RHt_newertinfoRtgenerate_grammartdumptIOErrorRtGrammartload(REtgptsavetforceR
tgte((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRus
cCsNtjj|�stStjj|�s,tStjj|�tjj|�kS(s0Inquire whether file a was written since file b.(R=R>texistsR;tTruetgetmtime(tatb((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRI�s
cCsctjj|�rt|�Sttjj|��}tj||�}tj	�}|j
|�|S(s�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    (R=R>tisfileRRHtbasenametpkgutiltget_dataRRNtloads(tpackagetgrammar_sourcetpickled_nametdataRS((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytload_packaged_grammar�s

cGsc|stjd}ntjdtjdtjdd�x$|D]}t|dtdt�q?WtS(s�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    itlevelR,tformats%(message)sRQRR(RCtargvRtbasicConfigtINFOtstdoutRRV(targsRE((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytmain�s
t__main__(t__doc__t
__author__t__all__R0R=RR\R6RCRRRRRRtobjectRRHRRVR;RRIRcRkR9texittint(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt<module>s$(P					
tokenize.pyo000064400000040762150471355400007142 0ustar00�
{fc@sdZdZdZddlZddlZddlmZmZddlTddl	m
Z
gee
�D]Zed	d
krge^qgddd
gZ
[
yeWnek
r�eZnXd�Zd�Zd�ZdZdZeede�ee�ZdZdZdZdZdZeeeee�ZdZedd�ee�ZdeZeee�Z ede d�Z!ee!e e�Z"dZ#d Z$d!Z%d"Z&ed#d$�Z'ed%d&�Z(ed'd(d)d*d+d,d-d.d/�	Z)d0Z*ed1d2�Z+ee)e*e+�Z,ee"e,e(e�Z-ee-Z.ed3ed4d�d5ed6d��Z/edee'�Z0eee0e"e,e/e�Z1e2ej3e.e1e%e&f�\Z4Z5Z6Z7i&ej3e#�d46ej3e$�d66e6d76e7d86e6d96e7d:6e6d;6e7d<6e6d=6e7d>6e6d?6e7d@6e6dA6e7dB6e6dC6e7dD6e6dE6e7dF6e6dG6e7dH6e6dI6e7dJ6e6dK6e7dL6e6dM6e7dN6e6dO6e7dP6e6dQ6e7dR6e6dS6e7dT6ddU6ddV6ddW6ddX6ddY6ddZ6Z9iZ:xd�D]Z;e;e:e;<q�WiZ<xd�D]Z;e;e<e;<qWdwZ=dxe>fdy��YZ?dze>fd{��YZ@d|�ZAeAd}�ZBd~�ZCdd�d���YZDej3d��ZEej3d��ZFd��ZGd��ZHd��ZId��ZJeKd�krddlLZLeMeLjN�dkreBeOeLjNd�jP�qeBeLjQjP�ndS(�s�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.sKa-Ping Yee <ping@lfw.org>s@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaroi����N(tBOM_UTF8tlookup(t*i(ttokenit_ttokenizetgenerate_tokenst
untokenizecGsddj|�dS(Nt(t|t)(tjoin(tchoices((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytgroup0tcGst|�dS(NR(R
(R((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytany1RcGst|�dS(Nt?(R
(R((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytmaybe2Rs[ \f\t]*s	#[^\r\n]*s\\\r?\ns[a-zA-Z_]\w*s
0[bB][01]*s0[xX][\da-fA-F]*[lL]?s0[oO]?[0-7]*[lL]?s
[1-9]\d*[lL]?s[eE][-+]?\d+s\d+\.\d*s\.\d+s\d+s\d+[jJ]s[jJ]s[^'\\]*(?:\\.[^'\\]*)*'s[^"\\]*(?:\\.[^"\\]*)*"s%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''s%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""s[ubUB]?[rR]?'''s[ubUB]?[rR]?"""s&[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'s&[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"s\*\*=?s>>=?s<<=?s<>s!=s//=?s->s[+\-*/%&@|^=<>]=?t~s[][(){}]s\r?\ns[:;.,`@]s'[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*t's'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*t"s'''s"""sr'''sr"""su'''su"""sb'''sb"""sur'''sur"""sbr'''sbr"""sR'''sR"""sU'''sU"""sB'''sB"""suR'''suR"""sUr'''sUr"""sUR'''sUR"""sbR'''sbR"""sBr'''sBr"""sBR'''sBR"""trtRtutUtbtBsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"it
TokenErrorcBseZRS((t__name__t
__module__(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�stStopTokenizingcBseZRS((RR(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�sc	CsA|\}}|\}}d||||t|t|�fGHdS(Ns%d,%d-%d,%d:	%s	%s(ttok_nametrepr(	ttypeRtstarttendtlinetsrowtscolterowtecol((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt
printtoken�scCs)yt||�Wntk
r$nXdS(s:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N(t
tokenize_loopR(treadlinet
tokeneater((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�s

cCs%xt|�D]}||�q
WdS(N(R(R+R,t
token_info((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR*�stUntokenizercBs,eZd�Zd�Zd�Zd�ZRS(cCsg|_d|_d|_dS(Nii(ttokenstprev_rowtprev_col(tself((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt__init__�s		cCs:|\}}||j}|r6|jjd|�ndS(Nt (R1R/tappend(R2R"trowtcolt
col_offset((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytadd_whitespace�s
cCs�x�|D]�}t|�dkr3|j||�Pn|\}}}}}|j|�|jj|�|\|_|_|ttfkr|jd7_d|_qqWdj	|j�S(NiiiR(
tlentcompatR9R/R5R0R1tNEWLINEtNLR(R2titerablettttok_typeRR"R#R$((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR�s

c	Cs%t}g}|jj}|\}}|ttfkrC|d7}n|ttfkr^t}nx�|D]�}|d \}}|ttfkr�|d7}n|tkr�|j|�qenZ|t	kr�|j
�qen>|ttfkr�t}n#|r|r||d�t}n||�qeWdS(NR4ii����(tFalseR/R5tNAMEtNUMBERR<R=tTruetINDENTtDEDENTtpop(	R2RR>t	startlinetindentsttoks_appendttoknumttokvalttok((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR;�s0
	



		(RRR3R9RR;(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR.�s			s&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCs^|d j�jdd�}|dks7|jd�r;dS|dksV|jd
�rZdS|S(s(Imitates get_normal_name in tokenizer.c.iRt-sutf-8sutf-8-slatin-1s
iso-8859-1siso-latin-1slatin-1-siso-8859-1-siso-latin-1-(slatin-1s
iso-8859-1siso-latin-1(slatin-1-siso-8859-1-siso-latin-1-(tlowertreplacet
startswith(torig_enctenc((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt_get_normal_name�scs�t�d}d}�fd�}�fd�}|�}|jt�rat�|d}d}n|sq|gfS||�}|r�||gfStj|�s�||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS(s
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    sutf-8cs'y��SWntk
r"t�SXdS(N(t
StopIterationtbytes((R+(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytread_or_stops
cs�y|jd�}Wntk
r'dSXtj|�}|sAdSt|jd��}yt|�}Wn!tk
r�t	d|��nX�r�|j
dkr�t	d��n|d7}n|S(Ntasciiisunknown encoding: sutf-8sencoding problem: utf-8s-sig(tdecodetUnicodeDecodeErrortNonet	cookie_retmatchRTR
RtLookupErrortSyntaxErrortname(R$tline_stringR]tencodingtcodec(t	bom_found(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytfind_cookies"


is	utf-8-sigN(RAR[RQRRDtblank_reR](R+RbtdefaultRWRetfirsttsecond((RdR+s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytdetect_encoding�s0	
	


	
cCst�}|j|�S(s�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited intput:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    (R.R(R>tut((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRFs	ccs@d}}}tjdd}}d\}}d}dg}	x�y
|�}
Wntk
rfd}
nX|d}dt|
�}}|r{|
s�td|
f�n|j|
�}|r|jd�}}t||
| |
||f||
fVd\}}d}q�|ra|
ddkra|
d	d
krat	||
|
|t|
�f|fVd}d}q@q�||
}||
}q@n`|dkr�|r�|
s�Pnd}xv||kr|
|dkr�|d}n?|
|dkr�|t
dt
}n|
|d
krd}nP|d}q�W||kr'Pn|
|dkr|
|dkr�|
|jd�}|t|�}t|||f||t|�f|
fVt
|
|||f|t|
�f|
fVq@t
tf|
|dk|
|||f|t|
�f|
fVq@n||	dkrI|	j|�t|
| |df||f|
fVnx�||	dkr�||	kr�tdd|||
f��n|	d }	td||f||f|
fVqLWn$|
s�td|dff�nd}x||kr�tj|
|�}|r�|jd�\}}||f||f|}}}|
||!|
|}}||kss|dkr�|dkr�t||||
fVq�|dkr�t}|dkr�t
}n|||||
fVq�|dkr�t||||
fVq�|tkrrt|}|j|
|�}|rR|jd�}|
||!}t||||f|
fVq�||f}
|
|}|
}Pq�|tks�|d tks�|d tkr|ddkr�||f}
t|p�t|dp�t|d}|
|d}}|
}Pq�t||||
fVq�||kr5t||||
fVq�|dkrdt
||||f|
fVd}q�|dkr}|d}n|dkr�|d}nt||||
fVq�t	|
|||f||df|
fV|d}q�Wq@Wx2|	dD]&}td|df|dfdfVq�Wtd|df|dfdfVdS(sT
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    logical line; continuation lines are included.
    iRt
0123456789RisEOF in multi-line stringi����s\
i����s\
R4s	ss#
t#s
i����s3unindent does not match any outer indentation levels
<tokenize>sEOF in multi-line statementt.iis
s\s([{s)]}N(Ri(Ri(tstringt
ascii_lettersR[RUR:RR]R#tSTRINGt
ERRORTOKENttabsizetrstriptCOMMENTR=R5REtIndentationErrorRFt
pseudoprogtspanRCR<t
triple_quotedtendprogst
single_quotedRBtOPt	ENDMARKER(R+tlnumtparenlevt	continuedt	namecharstnumcharstcontstrtneedconttcontlineRIR$tpostmaxtstrstarttendprogtendmatchR#tcolumnt
comment_tokentnl_postpseudomatchR"tsposteposRtinitialtnewlinetindent((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR[s�	



	&



	$
# 
'
' 	


	


$t__main__(s'''s"""sr'''sr"""sR'''sR"""su'''su"""sU'''sU"""sb'''sb"""sB'''sB"""sur'''sur"""sUr'''sUr"""suR'''suR"""sUR'''sUR"""sbr'''sbr"""sBr'''sBr"""sbR'''sbR"""sBR'''sBR"""(RRsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"((Rt__doc__t
__author__t__credits__RotretcodecsRRtlib2to3.pgen2.tokenRRtdirtxt__all__RVt	NameErrortstrR
RRt
WhitespacetCommenttIgnoretNamet	Binnumbert	Hexnumbert	Octnumbert	Decnumbert	IntnumbertExponentt
PointfloattExpfloattFloatnumbert
ImagnumbertNumbertSingletDoubletSingle3tDouble3tTripletStringtOperatortBrackettSpecialtFunnyt
PlainTokentTokentContStrtPseudoExtrastPseudoTokentmaptcompilet	tokenprogRwtsingle3progtdouble3progR[RzRyR?R{Rst	ExceptionRRR)RR*R.R\RfRTRjRRRtsysR:targvtopenR+tstdin(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt<module>s�
/


			
		

'#

		8		I		�token.pyo000064400000004377150471355410006435 0ustar00�
{fc@s�dZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;iZ<xBe=�j>�D]1\Z?Z@eAe@�eAd�kr~e?e<e@<q~q~Wd<�ZBd=�ZCd>�ZDd?S(@s!Token constants (from "token.h").iiiiiiiiii	i
iii
iiiiiiiiiiiiiiiiiii i!i"i#i$i%i&i'i(i)i*i+i,i-i.i/i0i1i2i3i4i5i6i7i8i9icCs
|tkS(N(t	NT_OFFSET(tx((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt
ISTERMINALLscCs
|tkS(N(R(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt
ISNONTERMINALOscCs
|tkS(N(t	ENDMARKER(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pytISEOFRsN(Et__doc__RtNAMEtNUMBERtSTRINGtNEWLINEtINDENTtDEDENTtLPARtRPARtLSQBtRSQBtCOLONtCOMMAtSEMItPLUStMINUStSTARtSLASHtVBARtAMPERtLESStGREATERtEQUALtDOTtPERCENTt	BACKQUOTEtLBRACEtRBRACEtEQEQUALtNOTEQUALt	LESSEQUALtGREATEREQUALtTILDEt
CIRCUMFLEXt	LEFTSHIFTt
RIGHTSHIFTt
DOUBLESTARt	PLUSEQUALtMINEQUALt	STAREQUALt
SLASHEQUALtPERCENTEQUALt
AMPEREQUALt	VBAREQUALtCIRCUMFLEXEQUALtLEFTSHIFTEQUALtRIGHTSHIFTEQUALtDOUBLESTAREQUALtDOUBLESLASHtDOUBLESLASHEQUALtATtATEQUALtOPtCOMMENTtNLtRARROWt
ERRORTOKENtN_TOKENSRttok_nametglobalstitemst_namet_valuettypeRRR(((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt<module>s�		token.pyc000064400000004377150471355410006421 0ustar00�
{fc@s�dZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;iZ<xBe=�j>�D]1\Z?Z@eAe@�eAd�kr~e?e<e@<q~q~Wd<�ZBd=�ZCd>�ZDd?S(@s!Token constants (from "token.h").iiiiiiiiii	i
iii
iiiiiiiiiiiiiiiiiii i!i"i#i$i%i&i'i(i)i*i+i,i-i.i/i0i1i2i3i4i5i6i7i8i9icCs
|tkS(N(t	NT_OFFSET(tx((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt
ISTERMINALLscCs
|tkS(N(R(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt
ISNONTERMINALOscCs
|tkS(N(t	ENDMARKER(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pytISEOFRsN(Et__doc__RtNAMEtNUMBERtSTRINGtNEWLINEtINDENTtDEDENTtLPARtRPARtLSQBtRSQBtCOLONtCOMMAtSEMItPLUStMINUStSTARtSLASHtVBARtAMPERtLESStGREATERtEQUALtDOTtPERCENTt	BACKQUOTEtLBRACEtRBRACEtEQEQUALtNOTEQUALt	LESSEQUALtGREATEREQUALtTILDEt
CIRCUMFLEXt	LEFTSHIFTt
RIGHTSHIFTt
DOUBLESTARt	PLUSEQUALtMINEQUALt	STAREQUALt
SLASHEQUALtPERCENTEQUALt
AMPEREQUALt	VBAREQUALtCIRCUMFLEXEQUALtLEFTSHIFTEQUALtRIGHTSHIFTEQUALtDOUBLESTAREQUALtDOUBLESLASHtDOUBLESLASHEQUALtATtATEQUALtOPtCOMMENTtNLtRARROWt
ERRORTOKENtN_TOKENSRttok_nametglobalstitemst_namet_valuettypeRRR(((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt<module>s�		literals.pyo000064400000003372150471355410007126 0ustar00�
{fc@s�dZddlZi
dd6dd6dd6d	d
6dd6d
d6dd6dd6dd6dd6Zd�Zd�Zd�Zedkr�e�ndS(s<Safely evaluate Python string literals without using eval().i����Nstastbstfs
tns
trs	ttstvt't"s\cCs�|jdd�\}}tj|�}|dk	r7|S|jd�r�|d}t|�dkrutd|��nyt|d�}Wq�tk
r�td|��q�Xn7yt|d�}Wn!tk
r�td|��nXt|�S(	Niitxis!invalid hex string escape ('\%s')iis#invalid octal string escape ('\%s')(	tgrouptsimple_escapestgettNonet
startswithtlent
ValueErrortinttchr(tmtallttailtescthexesti((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pytescapes"


cCsX|d}|d |dkr+|d}n|t|�t|�!}tjdt|�S(Niis)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})(RtretsubR(tstq((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt
evalString(s


cCs_xXtd�D]J}t|�}t|�}t|�}||kr
|G|G|G|GHq
q
WdS(Ni(trangeRtreprR(RtcRte((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyttest2st__main__(t__doc__RRRRR#t__name__(((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt<module>s 

		
		conv.pyc000064400000017775150471355410006254 0ustar00�
{fc@sEdZddlZddlmZmZdejfd��YZdS(s�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

i����N(tgrammarttokent	ConvertercBs2eZdZd�Zd�Zd�Zd�ZRS(s2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs(|j|�|j|�|j�dS(s<Load the grammar tables from the text files written by pgen.N(tparse_graminit_htparse_graminit_ct
finish_off(tselft
graminit_ht
graminit_c((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pytrun/s

c	Csyt|�}Wn#tk
r5}d||fGHtSXi|_i|_d}x�|D]�}|d7}tjd|�}|r�|j�r�d|||j�fGHqU|j�\}}t	|�}||jks�t
�||jks�t
�||j|<||j|<qUWtS(s�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        sCan't open %s: %siis^#define\s+(\w+)\s+(\d+)$s%s(%s): can't parse %s(topentIOErrortFalset
symbol2numbert
number2symboltretmatchtstriptgroupstinttAssertionErrortTrue(	Rtfilenametfterrtlinenotlinetmotsymboltnumber((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR5s*		

	
c!Cs�	yt|�}Wn#tk
r5}d||fGHtSXd}|d|j�}}|dksqt||f��|d|j�}}|dks�t||f��|d|j�}}i}g}x�|jd�r�xC|jd�r tjd|�}|st||f��tt	|j
��\}	}
}g}x�t|�D]u}
|d|j�}}tjd|�}|s�t||f��tt	|j
��\}}|j||f�qHW|d|j�}}|d	ks�t||f��|||	|
f<|d|j�}}q�Wtjd
|�}|sKt||f��tt	|j
��\}}|t
|�ks�t||f��g}x�t|�D]�}
|d|j�}}tjd|�}|s�t||f��tt	|j
��\}}	}
||	|
f}|t
|�ks6t||f��|j|�q�W|j|�|d|j�}}|d	ks�t||f��|d|j�}}q�W||_i}tjd|�}|s�t||f��t	|jd��}x�t|�D]�}|d|j�}}tjd
|�}|sFt||f��|jd�}tt	|jdddd��\}}}}|j||ks�t||f��|j||ks�t||f��|dks�t||f��||}|t
|�kst||f��|d|j�}}tjd|�}|sYt||f��i}t|jd��}x`t|�D]R\}}t|�}x7td�D])}|d|>@r�d||d|<q�q�Wq�W||f||<q�W|d|j�}}|d	ks t||f��||_g}|d|j�}}tjd|�}|spt||f��t	|jd��}x�t|�D]�}|d|j�}}tjd|�}|s�t||f��|j
�\}}t	|�}|dkrd}nt|�}|j||f�q�W|d|j�}}|d	ksdt||f��||_|d|j�}}|dks�t||f��|d|j�}}tjd|�}|s�t||f��t	|jd��}|t
|j�kst�|d|j�}}|dksHt||f��|d|j�}}tjd|�}|s�t||f��t	|jd��}|t
|j�ks�t||f��|d|j�}}tjd|�}|s	t||f��t	|jd��} | |jks<	t||f��| |_|d|j�}}|d	ksz	t||f��y|d|j�}}Wntk
r�	nXds�	t||f��dS(s�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        sCan't open %s: %siis#include "pgenheaders.h"
s#include "grammar.h"
sstatic arc s)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$s\s+{(\d+), (\d+)},$s};
s'static state states_(\d+)\[(\d+)\] = {$s\s+{(\d+), arcs_(\d+)_(\d+)},$sstatic dfa dfas\[(\d+)\] = {$s0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$iiiis\s+("(?:\\\d\d\d)*")},$is!static label labels\[(\d+)\] = {$s\s+{(\d+), (0|"\w+")},$t0sgrammar _PyParser_Grammar = {
s
\s+(\d+),$s	dfas,
s\s+{(\d+), labels},$s	\s+(\d+)$N(R
RRtnextRt
startswithRRtmapRRtrangetappendtlentstatestgroupR
Rtevalt	enumeratetordtdfastNonetlabelststartt
StopIteration(!RRRRRRtallarcsR%Rtntmtktarcst_titjtstttstateR*tndfasRRtxtytztfirstt	rawbitsettctbyteR,tnlabelsR-((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyRTs�		$$
			-%%
$			'!	
cCs�i|_i|_xjt|j�D]Y\}\}}|tjkr_|dk	r_||j|<q"|dkr"||j|<q"q"WdS(s1Create additional useful structures.  (Internal).N(tkeywordsttokensR(R,RtNAMER+(Rtilabelttypetvalue((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR�s		"(t__name__t
__module__t__doc__R	RRR(((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyR$s
				�(RKRtpgen2RRtGrammarR(((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyt<module>sparse.pyo000064400000016035150471355410006421 0ustar00�
{fc@sFdZddlmZdefd��YZdefd��YZdS(s�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

i(ttokent
ParseErrorcBseZdZd�ZRS(s(Exception to signal the parser is stuck.cCsHtj|d||||f�||_||_||_||_dS(Ns!%s: type=%r, value=%r, context=%r(t	Exceptiont__init__tmsgttypetvaluetcontext(tselfRRRR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs			(t__name__t
__module__t__doc__R(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRstParsercBsSeZdZdd�Zdd�Zd�Zd�Zd�Zd�Z	d�Z
RS(	s5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    cCs||_|pd�|_dS(s�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|S(N((tgrammartnode((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyt<lambda>WtN(R
tconvert(RR
R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR9s	cCsk|dkr|jj}n|ddgf}|jj|d|f}|g|_d|_t�|_dS(s�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        iN(tNoneR
tstarttdfaststacktrootnodetsett
used_names(RRtnewnodet
stackentry((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytsetupYs
	cCs�|j|||�}x�tr�|jd\}}}|\}}	||}
x_|
D]\}}|jj|\}
}||kr�|j||||�|}xV||d|fgkr�|j�|js�tS|jd\}}}|\}}	q�WtS|
dkrQ|jj|
}|\}}||krS|j	|
|jj|
||�PqSqQqQWd|f|
kr�|j�|js�t
d|||��q�qt
d|||��qWdS(s<Add a token; return True iff this is the end of the program.i����iistoo much inputs	bad inputN(tclassifytTrueRR
tlabelstshifttpoptFalseRtpushR(RRRRtilabeltdfatstateRtstatestfirsttarcstitnewstatetttvtitsdfat	itsstatestitsfirst((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytaddtokenqs:	

	 
	cCs�|tjkrG|jj|�|jjj|�}|dk	rG|Sn|jjj|�}|dkr�t	d|||��n|S(s&Turn a token into a label.  (Internal)s	bad tokenN(
RtNAMERtaddR
tkeywordstgetRttokensR(RRRRR#((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR�sc	Csw|jd\}}}|||df}|j|j|�}|dk	r]|dj|�n|||f|jd<dS(sShift a token.  (Internal)i����N(RRRR
tappend(	RRRR*RR$R%RR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR�sc	Cs[|jd\}}}|d|gf}|||f|jd<|jj|d|f�dS(sPush a nonterminal.  (Internal)i����iN(RRR6(	RRtnewdfaR*RR$R%RR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR"�scCs�|jj�\}}}|j|j|�}|dk	r�|jrl|jd\}}}|dj|�q�||_|j|j_ndS(sPop a nonterminal.  (Internal)i����N(RR RR
RR6RR(RtpopdfatpopstatetpopnodeRR$R%R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR �s		N(R	R
RRRRR0RRR"R (((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs 	0				N(RRRRRtobjectR(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyt<module>sgrammar.pyo000064400000017004150471355410006732 0ustar00�
{fc@s�dZddlZddlZddlmZmZdefd��YZd�ZdZ	iZ
xBe	j�D]4Zerlej
�\ZZeee�e
e<qlqlWdS(	s�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

i����Ni(ttokenttokenizetGrammarcBsDeZdZd�Zd�Zd�Zd�Zd�Zd�ZRS(s�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCsXi|_i|_g|_i|_dg|_i|_i|_i|_d|_dS(NitEMPTYi(iR(	t
symbol2numbert
number2symboltstatestdfastlabelstkeywordsttokenstsymbol2labeltstart(tself((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt__init__Ms							cCs>t|d��)}t|j�}tj||d�WdQXdS(s�Dump the grammar tables to a pickle file.

        dump() recursively changes all dict to OrderedDict, so the pickled file
        is not exactly the same as what was passed in to dump(). load() uses the
        pickled file to create the tables, but  only changes OrderedDict to dict
        at the top level; it does not recursively change OrderedDict to dict.
        So, the loaded tables are different from the original tables that were
        passed to load() in that some of the OrderedDict (from the pickled file)
        are not changed back to dict. For parsing, this has no effect on
        performance because OrderedDict uses dict's __getitem__ with nothing in
        between.
        twbiN(topent_make_deterministict__dict__tpickletdump(R
tfilenametftd((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRXs
cCs<t|d�}tj|�}|j�|jj|�dS(s+Load the grammar tables from a pickle file.trbN(RRtloadtcloseRtupdate(R
RRR((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRis
cCs|jjtj|��dS(s3Load the grammar tables from a pickle bytes object.N(RRRtloads(R
tpkl((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRpscCsf|j�}x-dD]%}t||t||�j��qW|j|_|j|_|j|_|S(s#
        Copy the grammar.
        RRRR	R
R(RRRR	R
R(t	__class__tsetattrtgetattrtcopyRRR(R
tnewt	dict_attr((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR!ts
#

cCszddlm}dGH||j�dGH||j�dGH||j�dGH||j�dGH||j�dG|jGHd	S(
s:Dump the grammar tables to standard output, for debugging.i����(tpprintts2ntn2sRRRRN(R$RRRRRR(R
R$((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pytreport�s




(	t__name__t
__module__t__doc__RRRRR!R'(((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRs4					
cCs�t|t�r2tjtd�|j�D���St|t�r^g|D]}t|�^qHSt|t�r�td�|D��S|S(Ncss'|]\}}|t|�fVqdS(N(R(t.0tktv((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys	<genexpr>�scss|]}t|�VqdS(N(R(R+te((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys	<genexpr>�s(	t
isinstancetdicttcollectionstOrderedDicttsortedt	iteritemstlistRttuple(ttopR.((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR�ss
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
(R*R1RtRRtobjectRRt	opmap_rawtopmapt
splitlinestlinetsplittoptnameR (((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt<module>
sz	=literals.pyc000064400000003742150471355410007113 0ustar00�
{fc@s�dZddlZi
dd6dd6dd6d	d
6dd6d
d6dd6dd6dd6dd6Zd�Zd�Zd�Zedkr�e�ndS(s<Safely evaluate Python string literals without using eval().i����Nstastbstfs
tns
trs	ttstvt't"s\cCs|jdd�\}}|jd�s-t�tj|�}|dk	rL|S|jd�r�|d}t|�dkr�td|��nyt|d�}Wq�tk
r�td|��q�Xn7yt|d�}Wn!tk
r�td	|��nXt	|�S(
Niis\txis!invalid hex string escape ('\%s')iis#invalid octal string escape ('\%s')(
tgroupt
startswithtAssertionErrortsimple_escapestgettNonetlent
ValueErrortinttchr(tmtallttailtescthexesti((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pytescapes$


cCs�|jd�s4|jd�s4tt|d ���|d}|d |dkr_|d}n|j|�s�tt|t|����t|�dt|�ks�t�|t|�t|�!}tjdt|�S(NRRiiiis)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})(RRtreprtendswithRtretsubR(tstq((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt
evalString(s4

,"cCs_xXtd�D]J}t|�}t|�}t|�}||kr
|G|G|G|GHq
q
WdS(Ni(trangeRRR!(RtcRte((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyttest2st__main__(t__doc__RR
RR!R%t__name__(((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt<module>s 

		
		__init__.pyc000064400000000256150471355410007030 0ustar00�
{fc@s
dZdS(sThe pgen2 package.N(t__doc__(((s./usr/lib64/python2.7/lib2to3/pgen2/__init__.pyt<module>tdriver.pyo000064400000014400150471355410006574 0ustar00�
{fc@sdZdZddgZddlZddlZddlZddlZddlZddlZddl	m
Z
mZmZm
Z
mZdefd��YZd	�Zd
deedd�Zd�Zd
�Zd�Zedkrejee���ndS(sZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

s#Guido van Rossum <guido@python.org>tDrivertload_grammari����Ni(tgrammartparsettokenttokenizetpgencBsVeZddd�Zed�Zed�Zed�Zded�Zed�Z	RS(cCs:||_|dkr$tj�}n||_||_dS(N(RtNonetloggingt	getLoggertloggertconvert(tselfRRR
((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt__init__ s
		cCstj|j|j�}|j�d}d}d	}}}}	}
d}x�|D]�}|\}}}}	}
|||fkr�|\}
}||
kr�|d|
|7}|
}d}n||kr�||
||!7}|}q�n|tjtjfkr6||7}|	\}}|j	d�rQ|d7}d}qQqQn|t
jkrUtj|}n|r~|j
jdt
j|||�n|j||||f�r�|r�|j
jd�nPnd}|	\}}|j	d�rQ|d7}d}qQqQWtjd||||f��|jS(
s4Parse a series of tokens and return the syntax tree.iius
s%s %r (prefix=%r)sStop.tsincomplete inputN(RtParserRRtsetupRRtCOMMENTtNLtendswithRtOPtopmapR
tdebugttok_nametaddtokent
ParseErrortrootnode(RttokensRtptlinenotcolumnttypetvaluetstarttendt	line_texttprefixt	quintuplets_linenots_column((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_tokens'sR

	

	

	cCs"tj|j�}|j||�S(s*Parse a stream and return the syntax tree.(Rtgenerate_tokenstreadlineR((RtstreamRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream_rawWscCs|j||�S(s*Parse a stream and return the syntax tree.(R,(RR+R((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream\scCs;tj|d|�}z|j||�SWd|j�XdS(s(Parse a file and return the syntax tree.trN(tcodecstopenR-tclose(RtfilenametencodingRR+((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt
parse_file`scCs+tjtj|�j�}|j||�S(s*Parse a string and return the syntax tree.(RR)tStringIOR*R((RttextRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stringhsN(
t__name__t
__module__RR
tFalseR(R,R-R4R7(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRs0cCsRtjj|�\}}|dkr-d}n||djtttj��dS(Ns.txtRt.s.pickle(tostpathtsplitexttjointmaptstrtsystversion_info(tgttheadttail((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt_generate_pickle_namens	sGrammar.txtcCs�|dkrtj�}n|dkr3t|�n|}|sOt||�r�|jd|�tj|�}|r�|jd|�y|j|�Wq�t	k
r�}|jd|�q�Xq�nt
j�}|j|�|S(s'Load the grammar (maybe from a pickle).s!Generating grammar tables from %ssWriting grammar tables to %ssWriting failed: %sN(
RRR	RGt_newertinfoRtgenerate_grammartdumptIOErrorRtGrammartload(RDtgptsavetforceR
tgte((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRus
cCsNtjj|�stStjj|�s,tStjj|�tjj|�kS(s0Inquire whether file a was written since file b.(R<R=texistsR:tTruetgetmtime(tatb((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRH�s
cCsctjj|�rt|�Sttjj|��}tj||�}tj	�}|j
|�|S(s�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    (R<R=tisfileRRGtbasenametpkgutiltget_dataRRMtloads(tpackagetgrammar_sourcetpickled_nametdataRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytload_packaged_grammar�s

cGsc|stjd}ntjdtjdtjdd�x$|D]}t|dtdt�q?WtS(s�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    itlevelR+tformats%(message)sRPRQ(RBtargvRtbasicConfigtINFOtstdoutRRU(targsRD((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytmain�s
t__main__(t__doc__t
__author__t__all__R/R<RR[R5RBRRRRRRtobjectRRGRRUR:RRHRbRjR8texittint(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt<module>s$(P					
grammar.pyc000064400000017004150471355410006716 0ustar00�
{fc@s�dZddlZddlZddlmZmZdefd��YZd�ZdZ	iZ
xBe	j�D]4Zerlej
�\ZZeee�e
e<qlqlWdS(	s�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

i����Ni(ttokenttokenizetGrammarcBsDeZdZd�Zd�Zd�Zd�Zd�Zd�ZRS(s�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCsXi|_i|_g|_i|_dg|_i|_i|_i|_d|_dS(NitEMPTYi(iR(	t
symbol2numbert
number2symboltstatestdfastlabelstkeywordsttokenstsymbol2labeltstart(tself((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt__init__Ms							cCs>t|d��)}t|j�}tj||d�WdQXdS(s�Dump the grammar tables to a pickle file.

        dump() recursively changes all dict to OrderedDict, so the pickled file
        is not exactly the same as what was passed in to dump(). load() uses the
        pickled file to create the tables, but  only changes OrderedDict to dict
        at the top level; it does not recursively change OrderedDict to dict.
        So, the loaded tables are different from the original tables that were
        passed to load() in that some of the OrderedDict (from the pickled file)
        are not changed back to dict. For parsing, this has no effect on
        performance because OrderedDict uses dict's __getitem__ with nothing in
        between.
        twbiN(topent_make_deterministict__dict__tpickletdump(R
tfilenametftd((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRXs
cCs<t|d�}tj|�}|j�|jj|�dS(s+Load the grammar tables from a pickle file.trbN(RRtloadtcloseRtupdate(R
RRR((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRis
cCs|jjtj|��dS(s3Load the grammar tables from a pickle bytes object.N(RRRtloads(R
tpkl((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRpscCsf|j�}x-dD]%}t||t||�j��qW|j|_|j|_|j|_|S(s#
        Copy the grammar.
        RRRR	R
R(RRRR	R
R(t	__class__tsetattrtgetattrtcopyRRR(R
tnewt	dict_attr((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR!ts
#

cCszddlm}dGH||j�dGH||j�dGH||j�dGH||j�dGH||j�dG|jGHd	S(
s:Dump the grammar tables to standard output, for debugging.i����(tpprintts2ntn2sRRRRN(R$RRRRRR(R
R$((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pytreport�s




(	t__name__t
__module__t__doc__RRRRR!R'(((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRs4					
cCs�t|t�r2tjtd�|j�D���St|t�r^g|D]}t|�^qHSt|t�r�td�|D��S|S(Ncss'|]\}}|t|�fVqdS(N(R(t.0tktv((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys	<genexpr>�scss|]}t|�VqdS(N(R(R+te((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys	<genexpr>�s(	t
isinstancetdicttcollectionstOrderedDicttsortedt	iteritemstlistRttuple(ttopR.((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR�ss
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
(R*R1RtRRtobjectRRt	opmap_rawtopmapt
splitlinestlinetsplittoptnameR (((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt<module>
sz	=__pycache__/pgen.cpython-38.opt-1.pyc000064400000022161150521473260013303 0ustar00U

e5d�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.8/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_t�|j�|_|��|�	�\|_
|_|dk	rZ|�i|_|�
�dS�N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__c	Cst�}t|j���}|��|�|j�|�d|j�|D]&}dt|j	�}||j	|<||j
|<q:|D]�}|j|}g}|D]`}g}t|j�
��D]$\}	}
|�|�||	�|�|
�f�q�|jr�|�d|�|�f�|�|�q||j�|�||�||�f|j|j	|<qf|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar,�stater&�label�nextr	r	r
�make_grammars.

zParserGenerator.make_grammarcCs4|j|}i}t|�D]}|�||�}d||<q|S�Nr)rr%r))rr/r1Zrawfirstrr5�ilabelr	r	r
r-4s

zParserGenerator.make_firstcCs&t|j�}|d��r�||jkrZ||jkr4|j|S|j�|j|df�||j|<|Sn>tt|d�}||jkrz|j|S|j�|df�||j|<|Sn�t	|�}|d��r�||j
kr�|j
|S|j�tj|f�||j
|<|Sn>tj
|}||jk�r|j|S|j�|df�||j|<|SdS�Nr)r#�labels�isalphar$Zsymbol2labelr(�getattrr�tokens�eval�keywords�NAMErZopmap)rr/r5r9Zitoken�valuer	r	r
r)=s6













zParserGenerator.make_labelcCs8t|j���}|��|D]}||jkr|�|�qdSr)rrrr r�	calcfirst)rr0r1r	r	r
rks

zParserGenerator.addfirstsetsc	Cs�|j|}d|j|<|d}i}i}|j��D]x\}}||jkr�||jkrj|j|}|dkr~td|��n|�|�|j|}|�|�|||<q.d||<|di||<q.i}	|��D]:\}}
|
D],}||	kr�td||||	|f��||	|<q�q�||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr&r'�
ValueErrorrC�update)rr1r3r4ZtotalsetZoverlapcheckr5r6�fsetZinverseZitsfirstZsymbolr	r	r
rCss4








�zParserGenerator.calcfirstc	Cs�i}d}|jtjkr�|jtjkr*|��q|�tj�}|�tjd�|��\}}|�tj�|�	||�}t
|�}|�|�t
|�}|||<|dkr|}q||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectrA�OP�	parse_rhs�make_dfar#�simplify_dfa)	rrrr1�a�zr3ZoldlenZnewlenr	r	r
r�s"

zParserGenerator.parsec	s��fdd�}�fdd��t||�|�g}|D]�}i}|jD].}|jD]"\}}	|dk	rD�|	|�|i��qDq:t|���D]@\}}
|D]}|j|
kr�q�q�t|
|�}|�|�|�||�qvq,|S)Ncsi}�||�|Srr	)r4�base��
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecs:||krdSd||<|jD]\}}|dkr�||�qdSr8�r&)r4rRr5r6rSr	r
rT�sz,ParserGenerator.make_dfa.<locals>.addclosure)�DFAState�nfasetr&�
setdefaultr%r'r(�addarc)rr.�finishrUr,r4r&Znfastater5r6rX�str	rSr
rN�s"



zParserGenerator.make_dfac
Cs�td|�|g}t|�D]|\}}td|||kr2dp4d�|jD]T\}}||krZ|�|�}	nt|�}	|�|�|dkr�td|	�q>td||	f�q>qdS)NzDump of NFA for�  State�(final)�z	    -> %d�    %s -> %d)�print�	enumerater&r*r#r()
rr1r.r[Ztodor2r4r5r6�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsdtd|�t|�D]L\}}td||jr*dp,d�t|j���D]\}}td||�|�f�q>qdS)NzDump of DFA forr]r^r_r`)rarbr+r%r&r'r*)rr1r3r2r4r5r6r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCspd}|rld}t|�D]T\}}t|dt|��D]8}||}||kr.||=|D]}|�||�qLd}qq.qqdS)NTFr)rb�ranger#�
unifystate)rr3Zchangesr2Zstate_ircZstate_jr4r	r	r
rO�szParserGenerator.simplify_dfacCs~|��\}}|jdkr||fSt�}t�}|�|�|�|�|jdkrr|��|��\}}|�|�|�|�q>||fSdS)N�|)�	parse_altrB�NFAStaterZr)rrPrQZaaZzzr	r	r
rM�s




zParserGenerator.parse_rhscCsL|��\}}|jdks(|jtjtjfkrD|��\}}|�|�|}q||fS)N)�(�[)�
parse_itemrBrHrrA�STRINGrZ)rrP�br/�dr	r	r
ri
s
�
zParserGenerator.parse_altcCs�|jdkr>|��|��\}}|�tjd�|�|�||fS|��\}}|j}|dkr`||fS|��|�|�|dkr�||fS||fSdS)Nrl�])�+�*rr)rBrrMrKrrLrZ�
parse_atom)rrPrQrBr	r	r
rms


zParserGenerator.parse_itemcCs�|jdkr4|��|��\}}|�tjd�||fS|jtjtjfkrpt	�}t	�}|�
||j�|��||fS|�d|j|j�dS)Nrk�)z+expected (...) or NAME or STRING, got %s/%s)rBrrMrKrrLrHrArnrjrZ�raise_error)rrPrQr	r	r
rt(s
�zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|�d|||j|j�|j}|��|S)Nzexpected %s/%s, got %s/%s)rHrBrvr)rrHrBr	r	r
rK9s�zParserGenerator.expectcCsFt|j�}|dtjtjfkr*t|j�}q
|\|_|_|_|_|_	dSr:)
r6rr�COMMENT�NLrHrBZbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokenc
Gs^|r8z||}Wn&d�|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrryrz)r�msg�argsr	r	r
rvHs �zParserGenerator.raise_error)N)N)rrrrr7r-r)rrCrrNrdrerOrMrirmrtrKrrvr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)rjcCs
g|_dSrrV)rr	r	r
rSszNFAState.__init__NcCs|j�||f�dSr)r&r(�rr6r5r	r	r
rZVszNFAState.addarc)N)rrrrrZr	r	r	r
rjQsrjc@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
rWcCs||_||k|_i|_dSr)rXr+r&)rrX�finalr	r	r
r]s
zDFAState.__init__cCs||j|<dSrrVr�r	r	r
rZeszDFAState.addarccCs*|j��D]\}}||kr
||j|<q
dSr)r&r')r�old�newr5r6r	r	r
rgkszDFAState.unifystatecCsV|j|jkrdSt|j�t|j�kr(dS|j��D]\}}||j�|�k	r2dSq2dS)NFT)r+r#r&r'�get)r�otherr5r6r	r	r
�__eq__pszDFAState.__eq__N)rrrrrZrgr��__hash__r	r	r	r
rW[s
rW�Grammar.txtcCst|�}|��Sr)rr7)r�pr	r	r
�generate_grammar�sr�N)r�)r_rrrZGrammarr�objectrrjrWr�r	r	r	r
�<module>sI
%__pycache__/pgen.cpython-38.pyc000064400000023062150521473260012345 0ustar00U

e5d�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.8/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_t�|j�|_|��|�	�\|_
|_|dk	rZ|�i|_|�
�dS�N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__c	Cst�}t|j���}|��|�|j�|�d|j�|D]&}dt|j	�}||j	|<||j
|<q:|D]�}|j|}g}|D]`}g}t|j�
��D]$\}	}
|�|�||	�|�|
�f�q�|jr�|�d|�|�f�|�|�q||j�|�||�||�f|j|j	|<qf|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar,�stater&�label�nextr	r	r
�make_grammars.

zParserGenerator.make_grammarcCs4|j|}i}t|�D]}|�||�}d||<q|S�Nr)rr%r))rr/r1Zrawfirstrr5�ilabelr	r	r
r-4s

zParserGenerator.make_firstcCsbt|j�}|d��r�||jkrZ||jkr4|j|S|j�|j|df�||j|<|Snbtt|d�}t|t	�sxt
|��|tjks�t
|��||jkr�|j|S|j�|df�||j|<|Sn�|ddks�t
|��t
|�}|d���r ||jk�r�|j|S|j�tj|f�||j|<|Sn>tj|}||jk�r@|j|S|j�|df�||j|<|SdS)Nr)�"�')r#�labels�isalphar$Zsymbol2labelr(�getattrr�
isinstance�int�AssertionError�tok_name�tokens�eval�keywords�NAMErZopmap)rr/r5r9Zitoken�valuer	r	r
r)=s<












zParserGenerator.make_labelcCs8t|j���}|��|D]}||jkr|�|�qdSr)rrrr r�	calcfirst)rr0r1r	r	r
rks

zParserGenerator.addfirstsetsc	Cs�|j|}d|j|<|d}i}i}|j��D]x\}}||jkr�||jkrj|j|}|dkr~td|��n|�|�|j|}|�|�|||<q.d||<|di||<q.i}	|��D]:\}}
|
D],}||	kr�td||||	|f��||	|<q�q�||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr&r'�
ValueErrorrH�update)rr1r3r4ZtotalsetZoverlapcheckr5r6�fsetZinverseZitsfirstZsymbolr	r	r
rHss4








�zParserGenerator.calcfirstc	Cs�i}d}|jtjkr�|jtjkr*|��q|�tj�}|�tjd�|��\}}|�tj�|�	||�}t
|�}|�|�t
|�}|||<|dkr|}q||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectrF�OP�	parse_rhs�make_dfar#�simplify_dfa)	rrrr1�a�zr3ZoldlenZnewlenr	r	r
r�s"

zParserGenerator.parsec	s�t|t�st�t|t�st��fdd�}�fdd��t||�|�g}|D]�}i}|jD].}|jD]"\}}	|dk	r`�|	|�|i��q`qVt|���D]@\}}
|D]}|j|
kr�q�q�t|
|�}|�	|�|�
||�q�qH|S)Ncsi}�||�|Srr	)r4�base��
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecsHt|t�st�||krdSd||<|jD]\}}|dkr(�||�q(dSr8)r?�NFAStaterAr&)r4rWr5r6rXr	r
rY�sz,ParserGenerator.make_dfa.<locals>.addclosure)r?r[rA�DFAState�nfasetr&�
setdefaultr%r'r(�addarc)rr.�finishrZr,r4r&Znfastater5r6r]�str	rXr
rS�s&



zParserGenerator.make_dfac
Cs�td|�|g}t|�D]|\}}td|||kr2dp4d�|jD]T\}}||krZ|�|�}	nt|�}	|�|�|dkr�td|	�q>td||	f�q>qdS)NzDump of NFA for�  State�(final)�z	    -> %d�    %s -> %d)�print�	enumerater&r*r#r()
rr1r.r`Ztodor2r4r5r6�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsdtd|�t|�D]L\}}td||jr*dp,d�t|j���D]\}}td||�|�f�q>qdS)NzDump of DFA forrbrcrdre)rfrgr+r%r&r'r*)rr1r3r2r4r5r6r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCspd}|rld}t|�D]T\}}t|dt|��D]8}||}||kr.||=|D]}|�||�qLd}qq.qqdS)NTFr)rg�ranger#�
unifystate)rr3Zchangesr2Zstate_irhZstate_jr4r	r	r
rT�szParserGenerator.simplify_dfacCs~|��\}}|jdkr||fSt�}t�}|�|�|�|�|jdkrr|��|��\}}|�|�|�|�q>||fSdS)N�|)�	parse_altrGr[r_r)rrUrVZaaZzzr	r	r
rR�s




zParserGenerator.parse_rhscCsL|��\}}|jdks(|jtjtjfkrD|��\}}|�|�|}q||fS)N)�(�[)�
parse_itemrGrMrrF�STRINGr_)rrU�br/�dr	r	r
rn
s
�
zParserGenerator.parse_altcCs�|jdkr>|��|��\}}|�tjd�|�|�||fS|��\}}|j}|dkr`||fS|��|�|�|dkr�||fS||fSdS)Nrp�])�+�*rv)rGrrRrPrrQr_�
parse_atom)rrUrVrGr	r	r
rqs


zParserGenerator.parse_itemcCs�|jdkr4|��|��\}}|�tjd�||fS|jtjtjfkrpt	�}t	�}|�
||j�|��||fS|�d|j|j�dS)Nro�)z+expected (...) or NAME or STRING, got %s/%s)rGrrRrPrrQrMrFrrr[r_�raise_error)rrUrVr	r	r
rx(s
�zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|�d|||j|j�|j}|��|S)Nzexpected %s/%s, got %s/%s)rMrGrzr)rrMrGr	r	r
rP9s�zParserGenerator.expectcCsFt|j�}|dtjtjfkr*t|j�}q
|\|_|_|_|_|_	dS)Nr)
r6rr�COMMENT�NLrMrGZbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokenc
Gs^|r8z||}Wn&d�|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrr}r~)r�msg�argsr	r	r
rzHs �zParserGenerator.raise_error)N)N)rrrrr7r-r)rrHrrSrirjrTrRrnrqrxrPrrzr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)r[cCs
g|_dSr)r&)rr	r	r
rSszNFAState.__init__NcCs8|dkst|t�st�t|t�s$t�|j�||f�dSr)r?r�rAr[r&r(�rr6r5r	r	r
r_VszNFAState.addarc)N)rrrrr_r	r	r	r
r[Qsr[c@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
r\cCsLt|t�st�ttt|��t�s$t�t|t�s2t�||_||k|_i|_dSr)	r?�dictrAr6�iterr[r]r+r&)rr]�finalr	r	r
r]s
zDFAState.__init__cCs8t|t�st�||jkst�t|t�s*t�||j|<dSr)r?r�rAr&r\r�r	r	r
r_eszDFAState.addarccCs*|j��D]\}}||kr
||j|<q
dSr)r&r')r�old�newr5r6r	r	r
rlkszDFAState.unifystatecCsdt|t�st�|j|jkrdSt|j�t|j�kr6dS|j��D]\}}||j�|�k	r@dSq@dS)NFT)r?r\rAr+r#r&r'�get)r�otherr5r6r	r	r
�__eq__pszDFAState.__eq__N)rrrrr_rlr��__hash__r	r	r	r
r\[s
r\�Grammar.txtcCst|�}|��Sr)rr7)r�pr	r	r
�generate_grammar�sr�N)r�)rdrrrZGrammarr�objectrr[r\r�r	r	r	r
�<module>sI
%__pycache__/token.cpython-38.opt-1.pyc000064400000003541150521473260013473 0ustar00U

e5d�@sPdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<d=Z=d>Z>iZ?e@eA��B��D]$\ZCZDeEeD�eEd�k�reCe?eD<�qd?d@�ZFdAdB�ZGdCdD�ZHdES)Fz!Token constants (from "token.h").����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�<�cCs|tkS�N��	NT_OFFSET��x�rD�+/usr/lib64/python3.8/lib2to3/pgen2/token.py�
ISTERMINALOsrFcCs|tkSr?r@rBrDrDrE�
ISNONTERMINALRsrGcCs|tkSr?)�	ENDMARKERrBrDrDrE�ISEOFUsrIN)I�__doc__rH�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�
COLONEQUAL�N_TOKENSrA�tok_name�list�globals�items�_nameZ_value�typerFrGrIrDrDrDrE�<module>s�__pycache__/token.cpython-38.opt-2.pyc000064400000003457150521473260013502 0ustar00U

e5d�@sLdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<d=Z=iZ>e?e@��A��D]$\ZBZCeDeC�eDd�k�r
eBe>eC<�q
d>d?�ZEd@dA�ZFdBdC�ZGdDS)E����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�<�cCs|tkS�N��	NT_OFFSET��x�rD�+/usr/lib64/python3.8/lib2to3/pgen2/token.py�
ISTERMINALOsrFcCs|tkSr?r@rBrDrDrE�
ISNONTERMINALRsrGcCs|tkSr?)�	ENDMARKERrBrDrDrE�ISEOFUsrIN)HrH�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�
COLONEQUAL�N_TOKENSrA�tok_name�list�globals�items�_nameZ_value�typerFrGrIrDrDrDrE�<module>	s�__pycache__/parse.cpython-38.pyc000064400000014544150521473260012533 0ustar00U

e5d��@s4dZddlmZGdd�de�ZGdd�de�ZdS)z�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

�)�tokenc@s eZdZdZdd�Zdd�ZdS)�
ParseErrorz(Exception to signal the parser is stuck.cCs4t�|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.8/lib2to3/pgen2/parse.pyrs
�zParseError.__init__cCst|�|j|j|j|jffS�N)rrrr	)r
rrr�
__reduce__szParseError.__reduce__N)�__name__�
__module__�__qualname__�__doc__rrrrrrrsrc@sLeZdZdZddd�Zddd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�Parsera5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    NcCs||_|pdd�|_dS)a�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|Sr
r)�grammar�noderrr�<lambda>Z�z!Parser.__init__.<locals>.<lambda>N)r�convert)r
rrrrrr<szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)a�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setup\s
zParser.setupcCs<|�|||�}|jd\}}}|\}}	||}
|
D]�\}}|jj|\}
}||kr�|
dks^t�|�||||�|}||d|fgkr�|��|js�dS|jd\}}}|\}}	qrdS|
dkr2|jj|
}|\}}||kr2|�|
|jj|
||�qq2d|f|
k�r(|��|j�s6t	d|||��qt	d|||��qdS)	z<Add a token; return True iff this is the end of the program.����rTFztoo much inputz	bad inputN)
�classifyrr�labels�AssertionError�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokents@
�zParser.addtokencCsX|tjkr0|j�|�|jj�|�}|dk	r0|S|jj�|�}|dkrTtd|||��|S)z&Turn a token into a label.  (Internal)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r*rrrr$�s
zParser.classifyc	CsT|jd\}}}|||df}|�|j|�}|dk	r@|d�|�|||f|jd<dS)zShift a token.  (Internal)r"N)rrr�append)	r
rrr/r	r+r,rr rrrr'�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|j�|d|f�dS)zPush a nonterminal.  (Internal)r"Nr)rr8)	r
rZnewdfar/r	r+r,rr rrrr)�szParser.pushcCs`|j��\}}}|�|j|�}|dk	r\|jrL|jd\}}}|d�|�n||_|j|j_dS)zPop a nonterminal.  (Internal)Nr")rr(rrr8rr)r
ZpopdfaZpopstateZpopnoder r+r,rrrrr(�sz
Parser.pop)N)N)rrrrrr!r2r$r'r)r(rrrrrs
 
0	rN)r�rrr�objectrrrrr�<module>s
__pycache__/tokenize.cpython-38.pyc000064400000035652150521473260013254 0ustar00U

e5d?R�
@s�dZdZdZddlZddlZddlmZmZddlTddl	m
Z
d	d
�ee
�D�ddd
gZ[
ze
Wnek
r~eZ
YnXdd�Zdd�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZed d!�ee�Zd"eZeee�Z ed#e d$�Z!ee!e e�Z"d%Z#d&Z$d'Z%d(Z&d)Z'ee'd*e'd+�Z(ee'd,e'd-�Z)ed.d/d0d1d2d3d4d5d6�	Z*d7Z+ed8d9d:�Z,ee*e+e,�Z-ee"e-e)e�Z.ee.Z/ee'd;ed<d�e'd=ed>d��Z0edee(�Z1eee1e"e-e0e�Z2e3ej4e/e2e%e&f�\Z5Z6Z7Z8ed?d@dAdB�ed?d@dCdD�BdEdFdGdHdIdJhBZ9e�4e#�e�4e$�e7e8dK�dLdM�e9D�dNdM�e9D�dOdM�e9D��Z:d*d+hdPdQ�e9D�BdRdQ�e9D�BZ;d<d>hdSdQ�e9D�BdTdQ�e9D�BZ<dUZ=GdVdW�dWe>�Z?GdXdY�dYe>�Z@dZd[�ZAeAfd\d�ZBd]d^�ZCGd_d`�d`�ZDe�4daejE�ZFe�4dbejE�ZGdcdd�ZHdedf�ZIdgd
�ZJdhd�ZKeLdik�r�ddlMZMeNeMjO�dk�r�eBePeMjOd�jQ�neBeMjRjQ�dS)ja�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.zKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.8/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsdd�|�dS)N�(�|�))�join��choicesrrr�group0�rcGst|�dS)Nr�rrrrr�any1rrcGst|�dS)N�?rrrrr�maybe2rrcst�fdd��D��S)Nc3s4|],}�dD]}|��|��kr||VqqdS))�N)�casefold)r	r
�y��lrr�	<genexpr>4s

z _combinations.<locals>.<genexpr>)�setrrrr�
_combinations3s�r#z[ \f\t]*z	#[^\r\n]*z\\\r?\nz\w+z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z'(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz:=z[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�"�r�R�f�F�b�B�u�UZurZuRZUrZUR)r'r(r$r%cCsi|]}|�d�t�qS�r$)�single3prog�r	�prefixrrr�
<dictcomp>ysr5cCsi|]}|�d�t�qS�r%)�double3progr3rrrr5zscCsi|]
}|d�qS�Nrr3rrrr5{scCsh|]}|�d��qSr1rr3rrr�	<setcomp>sr9cCsh|]}|�d��qSr6rr3rrrr9�scCsh|]}|�d��qS)r'rr3rrrr9�scCsh|]}|�d��qS)r(rr3rrrr9�s�c@seZdZdS)�
TokenErrorN��__name__�
__module__�__qualname__rrrrr;�sr;c@seZdZdS)�StopTokenizingNr<rrrrr@�sr@c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�s
�rFcCs(zt||�Wntk
r"YnXdS)a:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N)�
tokenize_loopr@)�readline�
tokeneaterrrrr
�s
cCst|�D]}||�qdSr8)r)rHrIZ
token_inforrrrG�srGc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs8|\}}||jkst�||j}|r4|j�d|�dS)N� )rL�AssertionErrorrMrK�append)rN�start�row�col�
col_offsetrrr�add_whitespace�s

zUntokenizer.add_whitespacecCs�|D]p}t|�dkr$|�||�qv|\}}}}}|�|�|j�|�|\|_|_|ttfkr|jd7_d|_qd�	|j�S)N�rrr)
�len�compatrWrKrRrLrM�NEWLINE�NLr)rN�iterable�t�tok_typerrS�endrErrrr�s
zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}|D]�}|dd�\}}|ttttfkrl|d7}|tkr�|�|�q@n>|t	kr�|�
�q@n*|ttfkr�d}n|r�|r�||d�d}||�q@dS)NFrPTrX���)rKrR�NAME�NUMBERr[r\�ASYNC�AWAIT�INDENT�DEDENT�pop)	rNrr]�	startline�indents�toks_append�toknum�tokval�tokrrrrZ�s0
zUntokenizer.compatN)r=r>r?rOrWrrZrrrrrJ�srJz&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd����dd�}|dks*|�d�r.dS|dks@|�d�rDd	S|S)
z(Imitates get_normal_name in tokenizer.c.N�r�-�utf-8zutf-8-)zlatin-1�
iso-8859-1ziso-latin-1)zlatin-1-ziso-8859-1-ziso-latin-1-rr)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_name�s�rxcs�d�d}d}�fdd�}�fdd�}|�}|�t�rHd�|d	d�}d
}|sT|gfS||�}|rj||gfSt�|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)a
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    FNrqcs(z��WStk
r"t�YSXdSr8)�
StopIteration�bytesr)rHrr�read_or_stopsz%detect_encoding.<locals>.read_or_stopcs�z|�d�}Wntk
r$YdSXt�|�}|s8dSt|�d��}zt|�}Wn tk
rrtd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: rqzencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchrxrr�LookupError�SyntaxError�name)rE�line_stringr��encoding�codec)�	bom_foundrr�find_cookies"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)rur�blank_rer�)rHr��defaultr{r��first�secondr)r�rHr�detect_encoding�s0




r�cCst�}|�|�S)a�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    )rJr)r]�utrrrr:sccs�d}}}d\}}d}dg}d}d}	d}
d}z
|�}Wntk
rPd}YnX|d}dt|�}
}|�r2|s|td|��|�|�}|r�|�d�}
}t||d|�|||f||fVd\}}d}nd|�r|dd�d	k�r|d
d�dk�rt||||t|�f|fVd}d}q.n||}||}q.�nB|dk�r\|�s\|�sL�q,d}|
|k�r�||
dk�rr|d}n8||
d
k�r�|tdt}n||
dk�r�d}n�q�|
d}
�qP|
|k�rĐq,|�r�|Vd}||
dk�r�||
dk�rT||
d��d�}|
t|�}t	|||
f||
t|�f|fVt
||d�||f|t|�f|fVq.t
t	f||
dk||
d�||
f|t|�f|fVq.||dk�r�|�|�t|d|
�|df||
f|fV||dk�r4||k�r�t
dd||
|f��|dd�}|	�r|
|dk�rd}	d}d}
td||
f||
f|fV�q�|	�rt|�rt|
|dk�rtd}	d}d}
n|�sptd|df��d}|
|kr.t�||
�}|�r�|�d�\}}||f||f|}}}
|||�||}}|tjk�s�|dk�r�|dk�r�t||||fV�q&|dk�rJt}|dk�rt
}n
|	�r&d}|�r6|Vd}|||||fV�q&|dk�r�|�d��rdt�|�rt|Vd}t	||||fV�q&|tk�rt|}|�||
�}|�r�|�d�}
|||
�}|�r�|Vd}t||||
f|fVn||f}||d�}|}q.�q&|tk�s4|dd�tk�s4|dd�tk�r�|ddk�r�||f}t|�plt|d�plt|d}||d�d}}|}q.n |�r�|Vd}t||||fV�q&|���r�|dk�r�|	�r�|dk�r�tnt||||fV�qtt||||f}|dk�r|�s|}�qt|dk�rj|�rj|dtk�rj|ddk�rjd}	|d}
t|d|d|d|dfVd}|�rz|Vd}|Vnz|dk�r�|�r�|Vd}t
||||
f|fVd}nF|d k�r�|d}n|d!k�r�|d}|�r�|Vd}t||||fVn(t||
||
f||
df|fV|
d}
�qtq.|�r<|Vd}|dd�D]}td|df|dfdfV�qHtd|df|dfdfVdS)"a4
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    physical line.
    r)rrNFrrzEOF in multi-line string���z\
���z\
rP�	�z#
�#z
raz3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.T�
rXr�)�async�awaitr��def��\z([{z)]}) ryrYr;r�r`�STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr\rRrf�IndentationErrorrg�
pseudoprog�span�stringZdigitsrcr[�endswithrQ�
triple_quoted�endprogs�
single_quoted�isidentifierrdrerb�OP�	ENDMARKER)rH�lnum�parenlev�	continued�contstr�needcont�contlinerjZstashedZ	async_defZasync_def_indentZasync_def_nlrE�pos�max�strstart�endprog�endmatchr`�column�
comment_tokenZnl_pos�pseudomatchrS�spos�eposr�initial�newlinern�indentrrrrOs�



�*
�


�
�
�
 

���





��
�

�

�
��




��__main__)S�__doc__�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenrr�dir�__all__rz�	NameError�strrrrr#�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�StringZOperatorZBracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�map�compileZ	tokenprogr�r2r7Z_strprefixesr�r�r�r��	Exceptionr;r@rFr
rGrJ�ASCIIrr�rxr�rrr=�sysrY�argv�openrH�stdinrrrr�<module>s���


�����
������������8Ib
__pycache__/tokenize.cpython-38.opt-1.pyc000064400000035544150521473260014213 0ustar00U

e5d?R�
@s�dZdZdZddlZddlZddlmZmZddlTddl	m
Z
d	d
�ee
�D�ddd
gZ[
ze
Wnek
r~eZ
YnXdd�Zdd�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZed d!�ee�Zd"eZeee�Z ed#e d$�Z!ee!e e�Z"d%Z#d&Z$d'Z%d(Z&d)Z'ee'd*e'd+�Z(ee'd,e'd-�Z)ed.d/d0d1d2d3d4d5d6�	Z*d7Z+ed8d9d:�Z,ee*e+e,�Z-ee"e-e)e�Z.ee.Z/ee'd;ed<d�e'd=ed>d��Z0edee(�Z1eee1e"e-e0e�Z2e3ej4e/e2e%e&f�\Z5Z6Z7Z8ed?d@dAdB�ed?d@dCdD�BdEdFdGdHdIdJhBZ9e�4e#�e�4e$�e7e8dK�dLdM�e9D�dNdM�e9D�dOdM�e9D��Z:d*d+hdPdQ�e9D�BdRdQ�e9D�BZ;d<d>hdSdQ�e9D�BdTdQ�e9D�BZ<dUZ=GdVdW�dWe>�Z?GdXdY�dYe>�Z@dZd[�ZAeAfd\d�ZBd]d^�ZCGd_d`�d`�ZDe�4daejE�ZFe�4dbejE�ZGdcdd�ZHdedf�ZIdgd
�ZJdhd�ZKeLdik�r�ddlMZMeNeMjO�dk�r�eBePeMjOd�jQ�neBeMjRjQ�dS)ja�Tokenization help for Python programs.

generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens.  It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF).  It generates
5-tuples with these members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators

Older entry points
    tokenize_loop(readline, tokeneater)
    tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.zKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.8/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsdd�|�dS)N�(�|�))�join��choicesrrr�group0�rcGst|�dS)Nr�rrrrr�any1rrcGst|�dS)N�?rrrrr�maybe2rrcst�fdd��D��S)Nc3s4|],}�dD]}|��|��kr||VqqdS))�N)�casefold)r	r
�y��lrr�	<genexpr>4s

z _combinations.<locals>.<genexpr>)�setrrrr�
_combinations3s�r#z[ \f\t]*z	#[^\r\n]*z\\\r?\nz\w+z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z'(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz:=z[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�"�r�R�f�F�b�B�u�UZurZuRZUrZUR)r'r(r$r%cCsi|]}|�d�t�qS�r$)�single3prog�r	�prefixrrr�
<dictcomp>ysr5cCsi|]}|�d�t�qS�r%)�double3progr3rrrr5zscCsi|]
}|d�qS�Nrr3rrrr5{scCsh|]}|�d��qSr1rr3rrr�	<setcomp>sr9cCsh|]}|�d��qSr6rr3rrrr9�scCsh|]}|�d��qS)r'rr3rrrr9�scCsh|]}|�d��qS)r(rr3rrrr9�s�c@seZdZdS)�
TokenErrorN��__name__�
__module__�__qualname__rrrrr;�sr;c@seZdZdS)�StopTokenizingNr<rrrrr@�sr@c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�s
�rFcCs(zt||�Wntk
r"YnXdS)a:
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    N)�
tokenize_loopr@)�readline�
tokeneaterrrrr
�s
cCst|�D]}||�qdSr8)r)rHrIZ
token_inforrrrG�srGc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs*|\}}||j}|r&|j�d|�dS)N� )rMrK�append)rN�start�row�col�
col_offsetrrr�add_whitespace�s
zUntokenizer.add_whitespacecCs�|D]p}t|�dkr$|�||�qv|\}}}}}|�|�|j�|�|\|_|_|ttfkr|jd7_d|_qd�	|j�S)N�rrr)
�len�compatrVrKrQrLrM�NEWLINE�NLr)rN�iterable�t�tok_typerrR�endrErrrr�s
zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}|D]�}|dd�\}}|ttttfkrl|d7}|tkr�|�|�q@n>|t	kr�|�
�q@n*|ttfkr�d}n|r�|r�||d�d}||�q@dS)NFrPTrW���)rKrQ�NAME�NUMBERrZr[�ASYNC�AWAIT�INDENT�DEDENT�pop)	rNrr\�	startline�indents�toks_append�toknum�tokval�tokrrrrY�s0
zUntokenizer.compatN)r=r>r?rOrVrrYrrrrrJ�srJz&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd����dd�}|dks*|�d�r.dS|dks@|�d�rDd	S|S)
z(Imitates get_normal_name in tokenizer.c.N�r�-�utf-8zutf-8-)zlatin-1�
iso-8859-1ziso-latin-1)zlatin-1-ziso-8859-1-ziso-latin-1-rq)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_name�s�rwcs�d�d}d}�fdd�}�fdd�}|�}|�t�rHd�|d	d�}d
}|sT|gfS||�}|rj||gfSt�|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)a
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file. It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read
    in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263. If both a bom and a cookie are present, but
    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    FNrpcs(z��WStk
r"t�YSXdSr8)�
StopIteration�bytesr)rHrr�read_or_stopsz%detect_encoding.<locals>.read_or_stopcs�z|�d�}Wntk
r$YdSXt�|�}|s8dSt|�d��}zt|�}Wn tk
rrtd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: rpzencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchrwrr�LookupError�SyntaxError�name)rE�line_stringr�encoding�codec)�	bom_foundrr�find_cookies"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)rtr�blank_rer)rHr��defaultrzr��first�secondr)r�rHr�detect_encoding�s0




r�cCst�}|�|�S)a�Transform tokens back into Python source code.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output text will tokenize the back to the input
        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
        newcode = untokenize(t1)
        readline = iter(newcode.splitlines(1)).next
        t2 = [tok[:2] for tokin generate_tokens(readline)]
        assert t1 == t2
    )rJr)r\�utrrrr:sccstd}}}d\}}d}dg}d}d}	d}
d}z
|�}Wntk
rPd}YnX|d}dt|�}
}|�r2|s|td|��|�|�}|r�|�d�}
}t||d|�|||f||fVd\}}d}nd|�r|dd�d	k�r|d
d�dk�rt||||t|�f|fVd}d}q.n||}||}q.�nB|dk�r\|�s\|�sL�qd}|
|k�r�||
dk�rr|d}n8||
d
k�r�|tdt}n||
dk�r�d}n�q�|
d}
�qP|
|k�rĐq|�r�|Vd}||
dk�r�||
dk�rT||
d��d�}|
t|�}t	|||
f||
t|�f|fVt
||d�||f|t|�f|fVq.t
t	f||
dk||
d�||
f|t|�f|fVq.||dk�r�|�|�t|d|
�|df||
f|fV||dk�r4||k�r�t
dd||
|f��|dd�}|	�r|
|dk�rd}	d}d}
td||
f||
f|fV�q�|	�rt|�rt|
|dk�rtd}	d}d}
n|�sptd|df��d}|
|kr.t�||
�}|�r�|�d�\}}||f||f|}}}
|||�||}}|tjk�s�|dk�r�|dk�r�t||||fV�q|dk�rJt}|dk�rt
}n
|	�r&d}|�r6|Vd}|||||fV�q|dk�rx|�rd|Vd}t	||||fV�q|tk�r�t|}|�||
�}|�r�|�d�}
|||
�}|�r�|Vd}t||||
f|fVn||f}||d�}|}q.�q|tk�s$|dd�tk�s$|dd�tk�r�|ddk�rx||f}t|�p\t|d�p\t|d}||d�d}}|}q.n |�r�|Vd}t||||fV�q|���rr|dk�r�|	�r�|dk�r�tnt||||fV�qtt||||f}|dk�r�|�s�|}�qt|dk�rZ|�rZ|dtk�rZ|ddk�rZd}	|d}
t|d|d|d|dfVd}|�rj|Vd}|Vnz|dk�r�|�r�|Vd}t
||||
f|fVd}nF|d k�r�|d}n|d!k�r�|d}|�r�|Vd}t||||fVn(t||
||
f||
df|fV|
d}
�qtq.|�r,|Vd}|dd�D]}td|df|dfdfV�q8td|df|dfdfVdS)"a4
    The generate_tokens() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile).next    # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    physical line.
    r)rrNFrrzEOF in multi-line string���z\
���z\
rP�	�z#
�#z
r`z3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.TrWr��
)�async�awaitr��def��\z([{z)]})rxrXr;rr_�STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr[rQre�IndentationErrorrf�
pseudoprog�span�stringZdigitsrbrZ�
triple_quoted�endprogs�
single_quoted�isidentifierrcrdra�OP�	ENDMARKER)rH�lnum�parenlev�	continued�contstr�needcont�contlineriZstashedZ	async_defZasync_def_indentZasync_def_nlrE�pos�max�strstart�endprog�endmatchr_�column�
comment_tokenZnl_pos�pseudomatchrR�spos�eposr�initial�newlinerm�indentrrrrOs�



�*
�


�
�
�
 

���





��
�

�

�
��




��__main__)S�__doc__�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenrr�dir�__all__ry�	NameError�strrrrr#�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�StringZOperatorZBracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�map�compileZ	tokenprogr�r2r7Z_strprefixesr�r�r�r��	Exceptionr;r@rFr
rGrJ�ASCIIr~r�rwr�rrr=�sysrX�argv�openrH�stdinrrrr�<module>s���


�����
������������8Ib
__pycache__/__init__.cpython-38.opt-1.pyc000064400000000247150521473260014112 0ustar00U

e5d��@sdZdS)zThe pgen2 package.N)�__doc__�rr�./usr/lib64/python3.8/lib2to3/pgen2/__init__.py�<module>�__pycache__/tokenize.cpython-38.opt-2.pyc000064400000026056150521473260014212 0ustar00U

e5d?R�
@s�dZdZddlZddlZddlmZmZddlTddlm	Z	dd	�e
e	�D�d
ddgZ[	zeWne
k
rzeZYnXd
d�Zdd�Zdd�Zdd�ZdZdZeede�ee�ZdZdZdZdZedd�Zeeeee�ZdZedd �ee�Zd!eZeee�Zed"ed#�Z ee ee�Z!d$Z"d%Z#d&Z$d'Z%d(Z&ee&d)e&d*�Z'ee&d+e&d,�Z(ed-d.d/d0d1d2d3d4d5�	Z)d6Z*ed7d8d9�Z+ee)e*e+�Z,ee!e,e(e�Z-ee-Z.ee&d:ed;d�e&d<ed=d��Z/edee'�Z0eee0e!e,e/e�Z1e2ej3e.e1e$e%f�\Z4Z5Z6Z7ed>d?d@dA�ed>d?dBdC�BdDdEdFdGdHdIhBZ8e�3e"�e�3e#�e6e7dJ�dKdL�e8D�dMdL�e8D�dNdL�e8D��Z9d)d*hdOdP�e8D�BdQdP�e8D�BZ:d;d=hdRdP�e8D�BdSdP�e8D�BZ;dTZ<GdUdV�dVe=�Z>GdWdX�dXe=�Z?dYdZ�Z@e@fd[d
�ZAd\d]�ZBGd^d_�d_�ZCe�3d`ejD�ZEe�3daejD�ZFdbdc�ZGddde�ZHdfd�ZIdgd�ZJeKdhk�r�ddlLZLeMeLjN�dk�r�eAeOeLjNd�jP�neAeLjQjP�dS)izKa-Ping Yee <ping@lfw.org>z@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro�N)�BOM_UTF8�lookup)�*�)�tokencCsg|]}|ddkr|�qS)r�_�)�.0�xrr�./usr/lib64/python3.8/lib2to3/pgen2/tokenize.py�
<listcomp>%sr�tokenize�generate_tokens�
untokenizecGsdd�|�dS)N�(�|�))�join��choicesrrr�group0�rcGst|�dS)Nr�rrrrr�any1rrcGst|�dS)N�?rrrrr�maybe2rrcst�fdd��D��S)Nc3s4|],}�dD]}|��|��kr||VqqdS))�N)�casefold)r	r
�y��lrr�	<genexpr>4s

z _combinations.<locals>.<genexpr>)�setrrrr�
_combinations3s�r#z[ \f\t]*z	#[^\r\n]*z\\\r?\nz\w+z0[bB]_?[01]+(?:_[01]+)*z(0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?z0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?z[1-9]\d*(?:_\d+)*[lL]?z0[lL]?z[eE][-+]?\d+(?:_\d+)*z\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?z\.\d+(?:_\d+)*z\d+(?:_\d+)*z\d+(?:_\d+)*[jJ]z[jJ]z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z'(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?�'''�"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z<>z!=z//=?z->z[+\-*/%&@|^=<>]=?�~z[][(){}]z\r?\nz:=z[:;.,`@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*�'z"[^\n"\\]*(?:\\.[^\n"\\]*)*�"�r�R�f�F�b�B�u�UZurZuRZUrZUR)r'r(r$r%cCsi|]}|�d�t�qS�r$)�single3prog�r	�prefixrrr�
<dictcomp>ysr5cCsi|]}|�d�t�qS�r%)�double3progr3rrrr5zscCsi|]
}|d�qS�Nrr3rrrr5{scCsh|]}|�d��qSr1rr3rrr�	<setcomp>sr9cCsh|]}|�d��qSr6rr3rrrr9�scCsh|]}|�d��qS)r'rr3rrrr9�scCsh|]}|�d��qS)r(rr3rrrr9�s�c@seZdZdS)�
TokenErrorN��__name__�
__module__�__qualname__rrrrr;�sr;c@seZdZdS)�StopTokenizingNr<rrrrr@�sr@c		Cs4|\}}|\}}td||||t|t|�f�dS)Nz%d,%d-%d,%d:	%s	%s)�print�tok_name�repr)	�typerZxxx_todo_changemeZxxx_todo_changeme1�lineZsrowZscolZerowZecolrrr�
printtoken�s
�rFcCs(zt||�Wntk
r"YnXdSr8)�
tokenize_loopr@)�readline�
tokeneaterrrrr
�s
cCst|�D]}||�qdSr8)r)rHrIZ
token_inforrrrG�srGc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�UntokenizercCsg|_d|_d|_dS)Nrr)�tokens�prev_row�prev_col)�selfrrr�__init__�szUntokenizer.__init__cCs*|\}}||j}|r&|j�d|�dS)N� )rMrK�append)rN�start�row�col�
col_offsetrrr�add_whitespace�s
zUntokenizer.add_whitespacecCs�|D]p}t|�dkr$|�||�qv|\}}}}}|�|�|j�|�|\|_|_|ttfkr|jd7_d|_qd�	|j�S)N�rrr)
�len�compatrVrKrQrLrM�NEWLINE�NLr)rN�iterable�t�tok_typerrR�endrErrrr�s
zUntokenizer.untokenizec	Cs�d}g}|jj}|\}}|ttfkr,|d7}|ttfkr<d}|D]�}|dd�\}}|ttttfkrl|d7}|tkr�|�|�q@n>|t	kr�|�
�q@n*|ttfkr�d}n|r�|r�||d�d}||�q@dS)NFrPTrW���)rKrQ�NAME�NUMBERrZr[�ASYNC�AWAIT�INDENT�DEDENT�pop)	rNrr\�	startline�indents�toks_append�toknum�tokval�tokrrrrY�s0
zUntokenizer.compatN)r=r>r?rOrVrrYrrrrrJ�srJz&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCsH|dd����dd�}|dks*|�d�r.dS|dks@|�d�rDdS|S)	N�r�-�utf-8zutf-8-)zlatin-1�
iso-8859-1ziso-latin-1)zlatin-1-ziso-8859-1-ziso-latin-1-rq)�lower�replace�
startswith)�orig_enc�encrrr�_get_normal_name�s�rwcs�d�d}d}�fdd�}�fdd�}|�}|�t�rHd�|dd�}d	}|sT|gfS||�}|rj||gfSt�|�s~||gfS|�}|s�||gfS||�}|r�|||gfS|||gfS)
NFrpcs(z��WStk
r"t�YSXdSr8)�
StopIteration�bytesr)rHrr�read_or_stopsz%detect_encoding.<locals>.read_or_stopcs�z|�d�}Wntk
r$YdSXt�|�}|s8dSt|�d��}zt|�}Wn tk
rrtd|��YnX�r�|j	dkr�td��|d7}|S)N�asciirzunknown encoding: rpzencoding problem: utf-8z-sig)
�decode�UnicodeDecodeError�	cookie_re�matchrwrr�LookupError�SyntaxError�name)rE�line_stringr�encoding�codec)�	bom_foundrr�find_cookies"

z$detect_encoding.<locals>.find_cookieT�z	utf-8-sig)rtr�blank_rer)rHr��defaultrzr��first�secondr)r�rHr�detect_encoding�s0




r�cCst�}|�|�Sr8)rJr)r\�utrrrr:sccstd}}}d\}}d}dg}d}d}	d}
d}z
|�}Wntk
rPd}YnX|d}dt|�}
}|�r2|s|td|��|�|�}|r�|�d�}
}t||d|�|||f||fVd\}}d}nd|�r|dd�dk�r|d	d�d
k�rt||||t|�f|fVd}d}q.n||}||}q.�nB|dk�r\|�s\|�sL�qd}|
|k�r�||
dk�rr|d}n8||
dk�r�|tdt}n||
d
k�r�d}n�q�|
d}
�qP|
|k�rĐq|�r�|Vd}||
dk�r�||
dk�rT||
d��d�}|
t|�}t	|||
f||
t|�f|fVt
||d�||f|t|�f|fVq.t
t	f||
dk||
d�||
f|t|�f|fVq.||dk�r�|�|�t|d|
�|df||
f|fV||dk�r4||k�r�t
dd||
|f��|dd�}|	�r|
|dk�rd}	d}d}
td||
f||
f|fV�q�|	�rt|�rt|
|dk�rtd}	d}d}
n|�sptd|df��d}|
|kr.t�||
�}|�r�|�d�\}}||f||f|}}}
|||�||}}|tjk�s�|dk�r�|dk�r�t||||fV�q|dk�rJt}|dk�rt
}n
|	�r&d}|�r6|Vd}|||||fV�q|dk�rx|�rd|Vd}t	||||fV�q|tk�r�t|}|�||
�}|�r�|�d�}
|||
�}|�r�|Vd}t||||
f|fVn||f}||d�}|}q.�q|tk�s$|dd�tk�s$|dd�tk�r�|ddk�rx||f}t|�p\t|d�p\t|d}||d�d}}|}q.n |�r�|Vd}t||||fV�q|���rr|dk�r�|	�r�|dk�r�tnt||||fV�qtt||||f}|dk�r�|�s�|}�qt|dk�rZ|�rZ|dtk�rZ|ddk�rZd}	|d}
t|d|d|d|dfVd}|�rj|Vd}|Vnz|dk�r�|�r�|Vd}t
||||
f|fVd}nF|dk�r�|d}n|d k�r�|d}|�r�|Vd}t||||fVn(t||
||
f||
df|fV|
d}
�qtq.|�r,|Vd}|dd�D]}td|df|dfdfV�q8td|df|dfdfVdS)!Nr)rrFrrzEOF in multi-line string���z\
���z\
rP�	�z#
�#z
r`z3unindent does not match any outer indentation levelz
<tokenize>zEOF in multi-line statement�.TrWr��
)�async�awaitr��def��\z([{z)]})rxrXr;rr_�STRING�
ERRORTOKEN�tabsize�rstrip�COMMENTr[rQre�IndentationErrorrf�
pseudoprog�span�stringZdigitsrbrZ�
triple_quoted�endprogs�
single_quoted�isidentifierrcrdra�OP�	ENDMARKER)rH�lnum�parenlev�	continued�contstr�needcont�contlineriZstashedZ	async_defZasync_def_indentZasync_def_nlrE�pos�max�strstart�endprog�endmatchr_�column�
comment_tokenZnl_pos�pseudomatchrR�spos�eposr�initial�newlinerm�indentrrrrOs�



�*
�


�
�
�
 

���





��
�

�

�
��




��__main__)R�
__author__�__credits__r��re�codecsrrZlib2to3.pgen2.tokenrr�dir�__all__ry�	NameError�strrrrr#�
Whitespace�Comment�Ignore�Name�	Binnumber�	Hexnumber�	Octnumber�	Decnumber�	Intnumber�Exponent�
Pointfloat�Expfloat�Floatnumber�
Imagnumber�Number�Single�Double�Single3�Double3Z
_litprefix�Triple�StringZOperatorZBracket�Special�Funny�
PlainToken�Token�ContStr�PseudoExtras�PseudoToken�map�compileZ	tokenprogr�r2r7Z_strprefixesr�r�r�r��	Exceptionr;r@rFr
rGrJ�ASCIIr~r�rwr�rrr=�sysrX�argv�openrH�stdinrrrr�<module>s���


�����
������������8Ib
__pycache__/grammar.cpython-38.pyc000064400000013043150521473260013040 0ustar00U

e5d��@s`dZddlZddlmZGdd�de�ZdZiZe��D]"Z	e	r8e	�
�\ZZe
ee�ee<q8dS)a�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

�N�)�tokenc@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)�Grammara�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)N)rZEMPTY�)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.8/lib2to3/pgen2/grammar.py�__init__LszGrammar.__init__c	Cs,t|d��}t�|j|tj�W5QRXdS)z)Dump the grammar tables to a pickle file.�wbN)�open�pickle�dump�__dict__ZHIGHEST_PROTOCOL)r�filename�frrrrWszGrammar.dumpc	Cs0t|d��}t�|�}W5QRX|j�|�dS)z+Load the grammar tables from a pickle file.�rbN)rr�loadr�update)rrr�drrrr\szGrammar.loadcCs|j�t�|��dS)z3Load the grammar tables from a pickle bytes object.N)rrr�loads)rZpklrrrrbsz
Grammar.loadscCsT|��}dD]}t||t||����q|jdd�|_|jdd�|_|j|_|S)z#
        Copy the grammar.
        )rrr	rrr
N)�	__class__�setattr�getattr�copyr
rr)r�newZ	dict_attrrrrr"fszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�d	S)
z:Dump the grammar tables to standard output, for debugging.r)�pprintZs2nZn2srr	r
rN)r$�printrrrr	r
r)rr$rrr�reportss




zGrammar.reportN)
�__name__�
__module__�__qualname__�__doc__rrrrr"r&rrrrrs5
ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
)r*r�r�objectrZ	opmap_rawZopmap�
splitlines�line�split�op�namer!rrrr�<module>so3__pycache__/__init__.cpython-38.opt-2.pyc000064400000000213150521473260014104 0ustar00U

e5d��@sdS)N�rrr�./usr/lib64/python3.8/lib2to3/pgen2/__init__.py�<module>s__pycache__/literals.cpython-38.opt-2.pyc000064400000002413150521473260014170 0ustar00U

e5dc�@sLddlZdddddddd	d
dd�
Zd
d�Zdd�Zdd�ZedkrHe�dS)�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|�dd�\}}t�|�}|dk	r&|S|�d�r�|dd�}t|�dkrTtd|��zt|d�}Wq�tk
r�td|�d�Yq�Xn2zt|d�}Wn"tk
r�td|�d�YnXt|�S)	Nr��x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))�group�simple_escapes�get�
startswith�len�
ValueError�int�chr)�m�all�tailZescZhexes�i�r$�./usr/lib64/python3.8/lib2to3/pgen2/literals.py�escapes"

r&cCsH|d}|dd�|dkr$|d}|t|�t|��}t�dt|�S)Nr�z)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))r�re�subr&)�s�qr$r$r%�
evalString(s
r,cCs@td�D]2}t|�}t|�}t|�}||krt||||�qdS)N�)�ranger�reprr,�print)r#�cr*�er$r$r%�test2sr3�__main__)r(rr&r,r3�__name__r$r$r$r%�<module>s �
	__pycache__/driver.cpython-38.opt-1.pyc000064400000011747150521473260013655 0ustar00U

e5dQ�@s�dZdZddgZddlZddlZddlZddlZddlZddlm	Z	m
Z
mZmZm
Z
Gdd�de�Zd	d
�Zddd�Zdd�Zdd�Zdd�Zedkr�e�ee���dS)zZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrt��}||_||_dS)N)r�logging�	getLogger�logger�convert)�selfrr
r�r�,/usr/lib64/python3.8/lib2to3/pgen2/driver.py�__init__s
zDriver.__init__FcCstt�|j|j�}|��d}d}d}}}}	}
d}|D�]}|\}}}}	}
|||fkr�|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|�d�r<|d7}d}q<|t	j
kr�tj|}|�r
|j�
dt	j|||�|�||||f��r6|�r0|j�
d��qnd}|	\}}|�d�r<|d7}d}q<t�d||||f��|jS)	z4Parse a series of tokens and return the syntax tree.rrN��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrr
Zsetupr�COMMENT�NL�endswithr�OPZopmapr�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens&s^



�
�zDriver.parse_tokenscCst�|j�}|�||�S�z*Parse a stream and return the syntax tree.)r�generate_tokens�readliner#)r�streamrrrrr�parse_stream_rawVszDriver.parse_stream_rawcCs|�||�Sr$)r()rr'rrrr�parse_stream[szDriver.parse_streamc
Cs4tj|d|d��}|�||�W5QR�SQRXdS)z(Parse a file and return the syntax tree.�r)�encodingN)�io�openr))r�filenamer+rr'rrr�
parse_file_szDriver.parse_filecCst�t�|�j�}|�||�S)z*Parse a string and return the syntax tree.)rr%r,�StringIOr&r#)r�textrrrrr�parse_stringdszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr#r(r)r/r2rrrrrs

0


cCs:tj�|�\}}|dkrd}||d�tttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namejsrB�Grammar.txtTFc
Cs�|dkrt��}|dkr t|�n|}|s2t||�s�|�d|�t�|�}|r�|�d|�z|�|�Wq�tk
r�}z|�d|�W5d}~XYq�Xnt	�
�}|�|�|S)z'Load the grammar (maybe from a pickle).Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)r
rrB�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)r?Zgp�save�forcer�g�errrrqs
 
cCs8tj�|�sdStj�|�s dStj�|�tj�|�kS)z0Inquire whether file a was written since file b.FT)r7r8�exists�getmtime)�a�brrrrD�s
rDcCsFtj�|�rt|�Sttj�|��}t�||�}t�	�}|�
|�|S)a�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    )r7r8�isfilerrB�basename�pkgutil�get_datarrH�loads)�packageZgrammar_sourceZpickled_name�datarLrrr�load_packaged_grammar�s
rYcGsB|stjdd�}tjtjtjdd�|D]}t|ddd�q*dS)z�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    rNz%(message)s)�levelr'�formatT)rJrK)r=�argvr
ZbasicConfig�INFO�stdoutr)�argsr?rrr�main�s�r`�__main__)rCNTFN)�__doc__�
__author__�__all__r,r7r
rTr=rrrrrr	�objectrrBrrDrYr`r3�exit�intrrrr�<module>s(M�
	
__pycache__/literals.cpython-38.pyc000064400000003024150521473260013227 0ustar00U

e5dc�@sPdZddlZddddddd	d
ddd
�
Zdd�Zdd�Zdd�ZedkrLe�dS)z<Safely evaluate Python string literals without using eval().�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|�dd�\}}|�d�st�t�|�}|dk	r4|S|�d�r�|dd�}t|�dkrbtd|��zt|d�}Wq�tk
r�td|�d�Yq�Xn2zt|d�}Wn"tk
r�td	|�d�YnXt|�S)
Nr�r�x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))	�group�
startswith�AssertionError�simple_escapes�get�len�
ValueError�int�chr)�m�all�tailZescZhexes�i�r%�./usr/lib64/python3.8/lib2to3/pgen2/literals.py�escapes$

r'cCs�|�d�s(|�d�s(tt|dd����|d}|dd�|dkrL|d}|�|�sptt|t|�d����t|�dt|�ks�t�|t|�t|��}t�dt|�S)Nr	r
rr�rz)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))rr�repr�endswithr�re�subr')�s�qr%r%r&�
evalString(s($r/cCs@td�D]2}t|�}t|�}t|�}||krt||||�qdS)N�)�ranger r)r/�print)r$�cr-�er%r%r&�test2sr5�__main__)�__doc__r+rr'r/r5�__name__r%r%r%r&�<module>s"�
	__pycache__/parse.cpython-38.opt-1.pyc000064400000014506150521473260013470 0ustar00U

e5d��@s4dZddlmZGdd�de�ZGdd�de�ZdS)z�Parser engine for the grammar tables generated by pgen.

The grammar table must be loaded first.

See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.

�)�tokenc@s eZdZdZdd�Zdd�ZdS)�
ParseErrorz(Exception to signal the parser is stuck.cCs4t�|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.8/lib2to3/pgen2/parse.pyrs
�zParseError.__init__cCst|�|j|j|j|jffS�N)rrrr	)r
rrr�
__reduce__szParseError.__reduce__N)�__name__�
__module__�__qualname__�__doc__rrrrrrrsrc@sLeZdZdZddd�Zddd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�Parsera5Parser engine.

    The proper usage sequence is:

    p = Parser(grammar, [converter])  # create instance
    p.setup([start])                  # prepare for parsing
    <for each input token>:
        if p.addtoken(...):           # parse a token; may raise ParseError
            break
    root = p.rootnode                 # root of abstract syntax tree

    A Parser instance may be reused by calling setup() repeatedly.

    A Parser instance contains state pertaining to the current token
    sequence, and should not be used concurrently by different threads
    to parse separate token sequences.

    See driver.py for how to get input tokens by tokenizing a file or
    string.

    Parsing is complete when addtoken() returns True; the root of the
    abstract syntax tree can then be retrieved from the rootnode
    instance variable.  When a syntax error occurs, addtoken() raises
    the ParseError exception.  There is no error recovery; the parser
    cannot be used after a syntax error was reported (but it can be
    reinitialized by calling setup()).

    NcCs||_|pdd�|_dS)a�Constructor.

        The grammar argument is a grammar.Grammar instance; see the
        grammar module for more information.

        The parser is not ready yet for parsing; you must call the
        setup() method to get it started.

        The optional convert argument is a function mapping concrete
        syntax tree nodes to abstract syntax tree nodes.  If not
        given, no conversion is done and the syntax tree produced is
        the concrete syntax tree.  If given, it must be a function of
        two arguments, the first being the grammar (a grammar.Grammar
        instance), and the second being the concrete syntax tree node
        to be converted.  The syntax tree is converted from the bottom
        up.

        A concrete syntax tree node is a (type, value, context, nodes)
        tuple, where type is the node type (a token or symbol number),
        value is None for symbols and a string for tokens, context is
        None or an opaque value used for error reporting (typically a
        (lineno, offset) pair), and nodes is a list of children for
        symbols, and None for tokens.

        An abstract syntax tree node may be anything; this is entirely
        up to the converter function.

        cSs|Sr
r)�grammar�noderrr�<lambda>Z�z!Parser.__init__.<locals>.<lambda>N)r�convert)r
rrrrrr<szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)a�Prepare for parsing.

        This *must* be called before starting to parse.

        The optional argument is an alternative start symbol; it
        defaults to the grammar's start symbol.

        You can use a Parser instance to parse any number of programs;
        each time you call setup() the parser is reset to an initial
        state determined by the (implicit or explicit) start symbol.

        N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setup\s
zParser.setupcCs0|�|||�}|jd\}}}|\}}	||}
|
D]�\}}|jj|\}
}||kr�|�||||�|}||d|fgkr�|��|js�dS|jd\}}}|\}}	qfdS|
dkr2|jj|
}|\}}||kr2|�|
|jj|
||�qq2d|f|
k�r|��|j�s*td|||��qtd|||��qdS)	z<Add a token; return True iff this is the end of the program.���rTF�ztoo much inputz	bad inputN)	�classifyrr�labels�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokents>
�zParser.addtokencCsX|tjkr0|j�|�|jj�|�}|dk	r0|S|jj�|�}|dkrTtd|||��|S)z&Turn a token into a label.  (Internal)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r)rrrr$�s
zParser.classifyc	CsT|jd\}}}|||df}|�|j|�}|dk	r@|d�|�|||f|jd<dS)zShift a token.  (Internal)r"N)rrr�append)	r
rrr.r	r*r+rr rrrr&�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|j�|d|f�dS)zPush a nonterminal.  (Internal)r"Nr)rr7)	r
rZnewdfar.r	r*r+rr rrrr(�szParser.pushcCs`|j��\}}}|�|j|�}|dk	r\|jrL|jd\}}}|d�|�n||_|j|j_dS)zPop a nonterminal.  (Internal)Nr")rr'rrr7rr)r
ZpopdfaZpopstateZpopnoder r*r+rrrrr'�sz
Parser.pop)N)N)rrrrrr!r1r$r&r(r'rrrrrs
 
0	rN)r�rrr�objectrrrrr�<module>s
__pycache__/token.cpython-38.pyc000064400000003541150521473260012534 0ustar00U

e5d�@sPdZdZdZdZdZdZdZdZdZd	Z	d
Z
dZdZd
Z
dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;d<Z<d=Z=d>Z>iZ?e@eA��B��D]$\ZCZDeEeD�eEd�k�reCe?eD<�qd?d@�ZFdAdB�ZGdCdD�ZHdES)Fz!Token constants (from "token.h").����������	�
���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/�0�1�2�3�4�5�6�7�8�9�:�;�<�cCs|tkS�N��	NT_OFFSET��x�rD�+/usr/lib64/python3.8/lib2to3/pgen2/token.py�
ISTERMINALOsrFcCs|tkSr?r@rBrDrDrE�
ISNONTERMINALRsrGcCs|tkSr?)�	ENDMARKERrBrDrDrE�ISEOFUsrIN)I�__doc__rH�NAME�NUMBER�STRING�NEWLINE�INDENT�DEDENT�LPAR�RPAR�LSQB�RSQB�COLON�COMMA�SEMI�PLUS�MINUS�STAR�SLASH�VBAR�AMPER�LESS�GREATER�EQUAL�DOT�PERCENTZ	BACKQUOTE�LBRACE�RBRACE�EQEQUAL�NOTEQUAL�	LESSEQUAL�GREATEREQUAL�TILDE�
CIRCUMFLEX�	LEFTSHIFT�
RIGHTSHIFT�
DOUBLESTAR�	PLUSEQUAL�MINEQUAL�	STAREQUAL�
SLASHEQUAL�PERCENTEQUAL�
AMPEREQUAL�	VBAREQUAL�CIRCUMFLEXEQUAL�LEFTSHIFTEQUAL�RIGHTSHIFTEQUAL�DOUBLESTAREQUAL�DOUBLESLASH�DOUBLESLASHEQUAL�AT�ATEQUAL�OP�COMMENT�NL�RARROW�AWAIT�ASYNC�
ERRORTOKEN�
COLONEQUAL�N_TOKENSrA�tok_name�list�globals�items�_nameZ_value�typerFrGrIrDrDrDrE�<module>s�__pycache__/conv.cpython-38.opt-2.pyc000064400000007112150521473260013317 0ustar00U

e5d�%�@s.ddlZddlmZmZGdd�dej�ZdS)�N)�grammar�tokenc@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
�	ConvertercCs |�|�|�|�|��dS�N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r
�*/usr/lib64/python3.8/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	
Cs�zt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXi|_i|_d}|D]d}|d7}t�d|�}|s�|��r�td|||��f�qZ|��\}}t	|�}||j|<||j|<qZdS)N�Can't open %s: %sFr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)
�open�OSError�printZ
symbol2numberZ
number2symbol�re�match�strip�groups�int)	r	�filename�f�err�lineno�line�mo�symbol�numberr
r
rr5s(�

zConverter.parse_graminit_hc!
Cs�zt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXd}|dt|�}}|dt|�}}|dt|�}}i}g}|�d��r�|�d��rJt�d|�}ttt	|�
���\}	}
}g}t|�D]F}
|dt|�}}t�d|�}ttt	|�
���\}}|�||f�q�|dt|�}}|||	|
f<|dt|�}}q�t�d|�}ttt	|�
���\}}g}t|�D]R}
|dt|�}}t�d	|�}ttt	|�
���\}}	}
||	|
f}|�|��qx|�|�|dt|�}}|dt|�}}q�||_
i}t�d
|�}t	|�d��}t|�D]�}|dt|�}}t�d|�}|�d�}ttt	|�dd
dd���\}}}}||}|dt|�}}t�d|�}i}t|�d��}t|�D]@\}}t|�}td�D]$}|d|>@�r�d||d|<�qΐq�||f||<�q(|dt|�}}||_g}|dt|�}}t�d|�}t	|�d��}t|�D]^}|dt|�}}t�d|�}|�
�\}}t	|�}|dk�r�d}nt|�}|�||f��qX|dt|�}}||_|dt|�}}|dt|�}}t�d|�}t	|�d��}|dt|�}}|dt|�}}t�d|�}t	|�d��}|dt|�}}t�d|�}t	|�d��} | |_|dt|�}}z|dt|�}}Wntk
�r�YnXdS)Nr
Frrzstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0z
\s+(\d+),$z\s+{(\d+), labels},$z	\s+(\d+)$)rrr�next�
startswithrr�list�maprr�range�append�states�group�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!r	rrrrrZallarcsr+r�n�m�kZarcs�_�i�j�s�t�stater0Zndfasrr�x�y�z�firstZ	rawbitset�cZbyter1Znlabelsr2r
r
rrTs��
�
"
zConverter.parse_graminit_ccCsXi|_i|_t|j�D]<\}\}}|tjkr@|dk	r@||j|<q|dkr||j|<qdSr)�keywords�tokensr.r1r�NAME)r	Zilabel�type�valuer
r
rr�szConverter.finish_offN)�__name__�
__module__�__qualname__rrrrr
r
r
rr$s
&r)rZpgen2rrZGrammarrr
r
r
r�<module>s__pycache__/driver.cpython-38.opt-2.pyc000064400000010003150521473260013636 0ustar00U

e5dQ�@s�dZddgZddlZddlZddlZddlZddlZddlmZm	Z	m
Z
mZmZGdd�de
�Zdd	�Zdd
d�Zdd�Zdd�Zdd�Zedkr�e�ee���dS)z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrt��}||_||_dS�N)r�logging�	getLogger�logger�convert)�selfrrr
�r�,/usr/lib64/python3.8/lib2to3/pgen2/driver.py�__init__s
zDriver.__init__FcCstt�|j|j�}|��d}d}d}}}}	}
d}|D�]}|\}}}}	}
|||fkr�|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|�d�r<|d7}d}q<|t	j
kr�tj|}|�r
|j�
dt	j|||�|�||||f��r6|�r0|j�
d��qnd}|	\}}|�d�r<|d7}d}q<t�d||||f��|jS)Nrr��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrrZsetupr�COMMENT�NL�endswithr�OPZopmapr
�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens&s^



�
�zDriver.parse_tokenscCst�|j�}|�||�Sr
)r�generate_tokens�readliner$)r�streamrrrrr�parse_stream_rawVszDriver.parse_stream_rawcCs|�||�Sr
)r()rr'rrrr�parse_stream[szDriver.parse_streamc
Cs4tj|d|d��}|�||�W5QR�SQRXdS)N�r)�encoding)�io�openr))r�filenamer+rr'rrr�
parse_file_szDriver.parse_filecCst�t�|�j�}|�||�Sr
)rr%r,�StringIOr&r$)r�textrrrrr�parse_stringdszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr$r(r)r/r2rrrrrs

0


cCs:tj�|�\}}|dkrd}||d�tttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namejsrB�Grammar.txtTFc
Cs�|dkrt��}|dkr t|�n|}|s2t||�s�|�d|�t�|�}|r�|�d|�z|�|�Wq�tk
r�}z|�d|�W5d}~XYq�Xnt	�
�}|�|�|S)Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)rrrB�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)r?Zgp�save�forcer
�g�errrrqs
 
cCs8tj�|�sdStj�|�s dStj�|�tj�|�kS)NFT)r7r8�exists�getmtime)�a�brrrrD�s
rDcCsFtj�|�rt|�Sttj�|��}t�||�}t�	�}|�
|�|Sr
)r7r8�isfilerrB�basename�pkgutil�get_datarrH�loads)�packageZgrammar_sourceZpickled_name�datarLrrr�load_packaged_grammar�s
rYcGsB|stjdd�}tjtjtjdd�|D]}t|ddd�q*dS)Nrz%(message)s)�levelr'�formatT)rJrK)r=�argvrZbasicConfig�INFO�stdoutr)�argsr?rrr�main�s�r`�__main__)rCNTFN)�
__author__�__all__r,r7rrTr=rrrrrr	�objectrrBrrDrYr`r3�exit�intrrrr�<module>s&M�
	
__pycache__/driver.cpython-38.pyc000064400000012027150521473260012706 0ustar00U

e5dQ�@s�dZdZddgZddlZddlZddlZddlZddlZddlm	Z	m
Z
mZmZm
Z
Gdd�de�Zd	d
�Zddd�Zdd�Zdd�Zdd�Zedkr�e�ee���dS)zZParser driver.

This provides a high-level interface to parse a file into a syntax tree.

z#Guido van Rossum <guido@python.org>�Driver�load_grammar�N�)�grammar�parse�token�tokenize�pgenc@sHeZdZddd�Zddd�Zddd�Zdd	d
�Zddd�Zdd
d�ZdS)rNcCs&||_|dkrt��}||_||_dS)N)r�logging�	getLogger�logger�convert)�selfrr
r�r�,/usr/lib64/python3.8/lib2to3/pgen2/driver.py�__init__s
zDriver.__init__FcCs�t�|j|j�}|��d}d}d}}}}	}
d}|D�]8}|\}}}}	}
|||fkr�||f|ksxt||f|f��|\}
}||
kr�|d|
|7}|
}d}||kr�||
||�7}|}|tjtjfkr�||7}|	\}}|�	d�r<|d7}d}q<|t
jk�r
tj|}|�r(|j
�dt
j|||�|�||||f��rT|�rN|j
�d��q�d}|	\}}|�	d�r<|d7}d}q<t�d||||f��|jS)	z4Parse a series of tokens and return the syntax tree.rrN��
z%s %r (prefix=%r)zStop.zincomplete input)rZParserrr
Zsetup�AssertionErrorr�COMMENT�NL�endswithr�OPZopmapr�debug�tok_nameZaddtokenZ
ParseErrorZrootnode)r�tokensr�p�lineno�column�type�value�start�endZ	line_text�prefixZ	quintupleZs_linenoZs_columnrrr�parse_tokens&s`


�
�zDriver.parse_tokenscCst�|j�}|�||�S�z*Parse a stream and return the syntax tree.)r�generate_tokens�readliner$)r�streamrrrrr�parse_stream_rawVszDriver.parse_stream_rawcCs|�||�Sr%)r))rr(rrrr�parse_stream[szDriver.parse_streamc
Cs4tj|d|d��}|�||�W5QR�SQRXdS)z(Parse a file and return the syntax tree.�r)�encodingN)�io�openr*)r�filenamer,rr(rrr�
parse_file_szDriver.parse_filecCst�t�|�j�}|�||�S)z*Parse a string and return the syntax tree.)rr&r-�StringIOr'r$)r�textrrrrr�parse_stringdszDriver.parse_string)NN)F)F)F)NF)F)	�__name__�
__module__�__qualname__rr$r)r*r0r3rrrrrs

0


cCs:tj�|�\}}|dkrd}||d�tttj��dS)Nz.txtr�.z.pickle)�os�path�splitext�join�map�str�sys�version_info)�gt�head�tailrrr�_generate_pickle_namejsrC�Grammar.txtTFc
Cs�|dkrt��}|dkr t|�n|}|s2t||�s�|�d|�t�|�}|r�|�d|�z|�|�Wq�tk
r�}z|�d|�W5d}~XYq�Xnt	�
�}|�|�|S)z'Load the grammar (maybe from a pickle).Nz!Generating grammar tables from %szWriting grammar tables to %szWriting failed: %s)r
rrC�_newer�infor	Zgenerate_grammar�dump�OSErrorr�Grammar�load)r@Zgp�save�forcer�g�errrrqs
 
cCs8tj�|�sdStj�|�s dStj�|�tj�|�kS)z0Inquire whether file a was written since file b.FT)r8r9�exists�getmtime)�a�brrrrE�s
rEcCsFtj�|�rt|�Sttj�|��}t�||�}t�	�}|�
|�|S)a�Normally, loads a pickled grammar by doing
        pkgutil.get_data(package, pickled_grammar)
    where *pickled_grammar* is computed from *grammar_source* by adding the
    Python version and using a ``.pickle`` extension.

    However, if *grammar_source* is an extant file, load_grammar(grammar_source)
    is called instead. This facilitates using a packaged grammar file when needed
    but preserves load_grammar's automatic regeneration behavior when possible.

    )r8r9�isfilerrC�basename�pkgutil�get_datarrI�loads)�packageZgrammar_sourceZpickled_name�datarMrrr�load_packaged_grammar�s
rZcGsB|stjdd�}tjtjtjdd�|D]}t|ddd�q*dS)z�Main program, when run as a script: produce grammar pickle files.

    Calls load_grammar for each argument, a path to a grammar text file.
    rNz%(message)s)�levelr(�formatT)rKrL)r>�argvr
ZbasicConfig�INFO�stdoutr)�argsr@rrr�main�s�ra�__main__)rDNTFN)�__doc__�
__author__�__all__r-r8r
rUr>rrrrrr	�objectrrCrrErZrar4�exit�intrrrr�<module>s(M�
	
__pycache__/literals.cpython-38.opt-1.pyc000064400000002530150521473260014167 0ustar00U

e5dc�@sPdZddlZddddddd	d
ddd
�
Zdd�Zdd�Zdd�ZedkrLe�dS)z<Safely evaluate Python string literals without using eval().�N����
�
�	��'�"�\)
�a�b�f�n�r�t�vr	r
rcCs�|�dd�\}}t�|�}|dk	r&|S|�d�r�|dd�}t|�dkrTtd|��zt|d�}Wq�tk
r�td|�d�Yq�Xn2zt|d�}Wn"tk
r�td|�d�YnXt|�S)	Nr��x�z!invalid hex string escape ('\%s')��z#invalid octal string escape ('\%s'))�group�simple_escapes�get�
startswith�len�
ValueError�int�chr)�m�all�tailZescZhexes�i�r$�./usr/lib64/python3.8/lib2to3/pgen2/literals.py�escapes"

r&cCsH|d}|dd�|dkr$|d}|t|�t|��}t�dt|�S)Nr�z)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}))r�re�subr&)�s�qr$r$r%�
evalString(s
r,cCs@td�D]2}t|�}t|�}t|�}||krt||||�qdS)N�)�ranger�reprr,�print)r#�cr*�er$r$r%�test2sr3�__main__)�__doc__r(rr&r,r3�__name__r$r$r$r%�<module>s"�
	__pycache__/conv.cpython-38.pyc000064400000015556150521473260012372 0ustar00U

e5d�%�@s2dZddlZddlmZmZGdd�dej�ZdS)a�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

�N)�grammar�tokenc@s0eZdZdZdd�Zdd�Zdd�Zdd	�Zd
S)�	Convertera2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs |�|�|�|�|��dS)z<Load the grammar tables from the text files written by pgen.N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r	�*/usr/lib64/python3.8/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	
Cs�zt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXi|_i|_d}|D]�}|d7}t�d|�}|s�|��r�td|||��f�qZ|��\}}t	|�}||jks�t
�||jks�t
�||j|<||j|<qZdS)	z�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        �Can't open %s: %sFNr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)�open�OSError�print�
symbol2number�
number2symbol�re�match�strip�groups�int�AssertionError)	r�filename�f�err�lineno�line�mo�symbol�numberr	r	r
r5s,�

zConverter.parse_graminit_hc!
CsTzt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXd}|dt|�}}|dkspt||f��|dt|�}}|dks�t||f��|dt|�}}i}g}|�d��r�|�d��r�t�d	|�}|s�t||f��tt	t
|����\}	}
}g}t|�D]Z}
|dt|�}}t�d
|�}|�s<t||f��tt	t
|����\}}|�
||f��q|dt|�}}|dk�s�t||f��|||	|
f<|dt|�}}q�t�d|�}|�s�t||f��tt	t
|����\}}|t|�k�s�t||f��g}t|�D]~}
|dt|�}}t�d
|�}|�s:t||f��tt	t
|����\}}	}
||	|
f}|t|�k�sxt||f��|�
|��q|�
|�|dt|�}}|dk�s�t||f��|dt|�}}q�||_i}t�d|�}|�s�t||f��t
|�d��}t|�D�]j}|dt|�}}t�d|�}|�s@t||f��|�d�}tt	t
|�dddd���\}}}}|j||k�s�t||f��|j||k�s�t||f��|dk�s�t||f��||}|t|�k�s�t||f��|dt|�}}t�d|�}|�st||f��i}t|�d��}t|�D]@\}}t|�}td�D]$}|d|>@�r>d||d|<�q>�q&||f||<�q
|dt|�}}|dk�s�t||f��||_g}|dt|�}}t�d|�}|�s�t||f��t
|�d��}t|�D]p}|dt|�}}t�d|�}|�s$t||f��|��\}}t
|�}|dk�rHd}nt|�}|�
||f��q�|dt|�}}|dk�s�t||f��||_|dt|�}}|dk�s�t||f��|dt|�}}t�d|�}|�s�t||f��t
|�d��}|t|j�k�s
t�|dt|�}}|dk�s2t||f��|dt|�}}t�d|�}|�sbt||f��t
|�d��}|t|j�k�s�t||f��|dt|�}}t�d|�}|�s�t||f��t
|�d��} | |jk�s�t||f��| |_|dt|�}}|dk�st||f��z|dt|�}}Wntk
�r<YnXd�sPt||f��dS)a�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        rFNrr
z#include "pgenheaders.h"
z#include "grammar.h"
zstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z};
z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0zgrammar _PyParser_Grammar = {
z
\s+(\d+),$z	dfas,
z\s+{(\d+), labels},$z	\s+(\d+)$)rrr�nextr�
startswithrr�list�maprr�range�append�len�states�grouprr�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!rrrrrrZallarcsr.r�n�m�kZarcs�_�i�j�s�t�stater3Zndfasrr �x�y�z�firstZ	rawbitset�cZbyter4Znlabelsr5r	r	r
rTs��
�
"
zConverter.parse_graminit_ccCsXi|_i|_t|j�D]<\}\}}|tjkr@|dk	r@||j|<q|dkr||j|<qdS)z1Create additional useful structures.  (Internal).N)�keywords�tokensr1r4r�NAME)rZilabel�type�valuer	r	r
r�szConverter.finish_offN)�__name__�
__module__�__qualname__�__doc__rrrrr	r	r	r
r$s
&r)rMrZpgen2rrZGrammarrr	r	r	r
�<module>s__pycache__/pgen.cpython-38.opt-2.pyc000064400000022161150521473260013304 0ustar00U

e5d�5�@sdddlmZmZmZGdd�dej�ZGdd�de�ZGdd�de�ZGdd	�d	e�Z	ddd�Z
d
S)�)�grammar�token�tokenizec@seZdZdS)�PgenGrammarN)�__name__�
__module__�__qualname__�r	r	�*/usr/lib64/python3.8/lib2to3/pgen2/pgen.pyrsrc@s�eZdZd&dd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zdd�Zd'd d!�Zd"d#�Zd$d%�ZdS)(�ParserGeneratorNcCsld}|dkrt|�}|j}||_||_t�|j�|_|��|�	�\|_
|_|dk	rZ|�i|_|�
�dS�N)�open�close�filename�streamr�generate_tokens�readline�	generator�gettoken�parse�dfas�startsymbol�first�addfirstsets)�selfrrZclose_streamr	r	r
�__init__szParserGenerator.__init__c	Cst�}t|j���}|��|�|j�|�d|j�|D]&}dt|j	�}||j	|<||j
|<q:|D]�}|j|}g}|D]`}g}t|j�
��D]$\}	}
|�|�||	�|�|
�f�q�|jr�|�d|�|�f�|�|�q||j�|�||�||�f|j|j	|<qf|j	|j|_|S)N��)r�listr�keys�sort�remover�insert�len�
symbol2numberZ
number2symbol�sorted�arcs�items�append�
make_label�index�isfinal�states�
make_first�start)r�c�names�name�i�dfar,�stater&�label�nextr	r	r
�make_grammars.

zParserGenerator.make_grammarcCs4|j|}i}t|�D]}|�||�}d||<q|S�Nr)rr%r))rr/r1Zrawfirstrr5�ilabelr	r	r
r-4s

zParserGenerator.make_firstcCs&t|j�}|d��r�||jkrZ||jkr4|j|S|j�|j|df�||j|<|Sn>tt|d�}||jkrz|j|S|j�|df�||j|<|Sn�t	|�}|d��r�||j
kr�|j
|S|j�tj|f�||j
|<|Sn>tj
|}||jk�r|j|S|j�|df�||j|<|SdS�Nr)r#�labels�isalphar$Zsymbol2labelr(�getattrr�tokens�eval�keywords�NAMErZopmap)rr/r5r9Zitoken�valuer	r	r
r)=s6













zParserGenerator.make_labelcCs8t|j���}|��|D]}||jkr|�|�qdSr)rrrr r�	calcfirst)rr0r1r	r	r
rks

zParserGenerator.addfirstsetsc	Cs�|j|}d|j|<|d}i}i}|j��D]x\}}||jkr�||jkrj|j|}|dkr~td|��n|�|�|j|}|�|�|||<q.d||<|di||<q.i}	|��D]:\}}
|
D],}||	kr�td||||	|f��||	|<q�q�||j|<dS)Nrzrecursion for rule %rrzArule %s is ambiguous; %s is in the first sets of %s as well as %s)rrr&r'�
ValueErrorrC�update)rr1r3r4ZtotalsetZoverlapcheckr5r6�fsetZinverseZitsfirstZsymbolr	r	r
rCss4








�zParserGenerator.calcfirstc	Cs�i}d}|jtjkr�|jtjkr*|��q|�tj�}|�tjd�|��\}}|�tj�|�	||�}t
|�}|�|�t
|�}|||<|dkr|}q||fS)N�:)�typer�	ENDMARKER�NEWLINEr�expectrA�OP�	parse_rhs�make_dfar#�simplify_dfa)	rrrr1�a�zr3ZoldlenZnewlenr	r	r
r�s"

zParserGenerator.parsec	s��fdd�}�fdd��t||�|�g}|D]�}i}|jD].}|jD]"\}}	|dk	rD�|	|�|i��qDq:t|���D]@\}}
|D]}|j|
kr�q�q�t|
|�}|�|�|�||�qvq,|S)Ncsi}�||�|Srr	)r4�base��
addclosurer	r
�closure�s
z)ParserGenerator.make_dfa.<locals>.closurecs:||krdSd||<|jD]\}}|dkr�||�qdSr8�r&)r4rRr5r6rSr	r
rT�sz,ParserGenerator.make_dfa.<locals>.addclosure)�DFAState�nfasetr&�
setdefaultr%r'r(�addarc)rr.�finishrUr,r4r&Znfastater5r6rX�str	rSr
rN�s"



zParserGenerator.make_dfac
Cs�td|�|g}t|�D]|\}}td|||kr2dp4d�|jD]T\}}||krZ|�|�}	nt|�}	|�|�|dkr�td|	�q>td||	f�q>qdS)NzDump of NFA for�  State�(final)�z	    -> %d�    %s -> %d)�print�	enumerater&r*r#r()
rr1r.r[Ztodor2r4r5r6�jr	r	r
�dump_nfa�s

zParserGenerator.dump_nfacCsdtd|�t|�D]L\}}td||jr*dp,d�t|j���D]\}}td||�|�f�q>qdS)NzDump of DFA forr]r^r_r`)rarbr+r%r&r'r*)rr1r3r2r4r5r6r	r	r
�dump_dfa�s

zParserGenerator.dump_dfacCspd}|rld}t|�D]T\}}t|dt|��D]8}||}||kr.||=|D]}|�||�qLd}qq.qqdS)NTFr)rb�ranger#�
unifystate)rr3Zchangesr2Zstate_ircZstate_jr4r	r	r
rO�szParserGenerator.simplify_dfacCs~|��\}}|jdkr||fSt�}t�}|�|�|�|�|jdkrr|��|��\}}|�|�|�|�q>||fSdS)N�|)�	parse_altrB�NFAStaterZr)rrPrQZaaZzzr	r	r
rM�s




zParserGenerator.parse_rhscCsL|��\}}|jdks(|jtjtjfkrD|��\}}|�|�|}q||fS)N)�(�[)�
parse_itemrBrHrrA�STRINGrZ)rrP�br/�dr	r	r
ri
s
�
zParserGenerator.parse_altcCs�|jdkr>|��|��\}}|�tjd�|�|�||fS|��\}}|j}|dkr`||fS|��|�|�|dkr�||fS||fSdS)Nrl�])�+�*rr)rBrrMrKrrLrZ�
parse_atom)rrPrQrBr	r	r
rms


zParserGenerator.parse_itemcCs�|jdkr4|��|��\}}|�tjd�||fS|jtjtjfkrpt	�}t	�}|�
||j�|��||fS|�d|j|j�dS)Nrk�)z+expected (...) or NAME or STRING, got %s/%s)rBrrMrKrrLrHrArnrjrZ�raise_error)rrPrQr	r	r
rt(s
�zParserGenerator.parse_atomcCsD|j|ks|dk	r2|j|kr2|�d|||j|j�|j}|��|S)Nzexpected %s/%s, got %s/%s)rHrBrvr)rrHrBr	r	r
rK9s�zParserGenerator.expectcCsFt|j�}|dtjtjfkr*t|j�}q
|\|_|_|_|_|_	dSr:)
r6rr�COMMENT�NLrHrBZbegin�end�line)r�tupr	r	r
rAs
zParserGenerator.gettokenc
Gs^|r8z||}Wn&d�|gttt|���}YnXt||j|jd|jd|jf��dS)N� rr)�joinr�map�str�SyntaxErrorrryrz)r�msg�argsr	r	r
rvHs �zParserGenerator.raise_error)N)N)rrrrr7r-r)rrCrrNrdrerOrMrirmrtrKrrvr	r	r	r
r
s$
	.$

rc@seZdZdd�Zddd�ZdS)rjcCs
g|_dSrrV)rr	r	r
rSszNFAState.__init__NcCs|j�||f�dSr)r&r(�rr6r5r	r	r
rZVszNFAState.addarc)N)rrrrrZr	r	r	r
rjQsrjc@s0eZdZdd�Zdd�Zdd�Zdd�Zd	Zd	S)
rWcCs||_||k|_i|_dSr)rXr+r&)rrX�finalr	r	r
r]s
zDFAState.__init__cCs||j|<dSrrVr�r	r	r
rZeszDFAState.addarccCs*|j��D]\}}||kr
||j|<q
dSr)r&r')r�old�newr5r6r	r	r
rgkszDFAState.unifystatecCsV|j|jkrdSt|j�t|j�kr(dS|j��D]\}}||j�|�k	r2dSq2dS)NFT)r+r#r&r'�get)r�otherr5r6r	r	r
�__eq__pszDFAState.__eq__N)rrrrrZrgr��__hash__r	r	r	r
rW[s
rW�Grammar.txtcCst|�}|��Sr)rr7)r�pr	r	r
�generate_grammar�sr�N)r�)r_rrrZGrammarr�objectrrjrWr�r	r	r	r
�<module>sI
%__pycache__/conv.cpython-38.opt-1.pyc000064400000013774150521473260013331 0ustar00U

e5d�%�@s2dZddlZddlmZmZGdd�dej�ZdS)a�Convert graminit.[ch] spit out by pgen to Python code.

Pgen is the Python parser generator.  It is useful to quickly create a
parser from a grammar file in Python's grammar notation.  But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.

Note that the token numbers are constants determined by the standard
Python tokenizer.  The standard token module defines these numbers and
their names (the names are not used much).  The token numbers are
hardcoded into the Python tokenizer and into pgen.  A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.

On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.

Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.

�N)�grammar�tokenc@s0eZdZdZdd�Zdd�Zdd�Zdd	�Zd
S)�	Convertera2Grammar subclass that reads classic pgen output files.

    The run() method reads the tables as produced by the pgen parser
    generator, typically contained in two C files, graminit.h and
    graminit.c.  The other methods are for internal use only.

    See the base class for more documentation.

    cCs |�|�|�|�|��dS)z<Load the grammar tables from the text files written by pgen.N)�parse_graminit_h�parse_graminit_c�
finish_off)�selfZ
graminit_hZ
graminit_c�r	�*/usr/lib64/python3.8/lib2to3/pgen2/conv.py�run/s

z
Converter.runc	
Cs�zt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXi|_i|_d}|D]d}|d7}t�d|�}|s�|��r�td|||��f�qZ|��\}}t	|�}||j|<||j|<qZdS)	z�Parse the .h file written by pgen.  (Internal)

        This file is a sequence of #define statements defining the
        nonterminals of the grammar as numbers.  We build two tables
        mapping the numbers to names and back.

        �Can't open %s: %sFNr�z^#define\s+(\w+)\s+(\d+)$z%s(%s): can't parse %sT)
�open�OSError�printZ
symbol2numberZ
number2symbol�re�match�strip�groups�int)	r�filename�f�err�lineno�line�mo�symbol�numberr	r	r
r5s(�

zConverter.parse_graminit_hc!
Cs�zt|�}Wn8tk
rD}ztd||f�WY�dSd}~XYnXd}|dt|�}}|dt|�}}|dt|�}}i}g}|�d��r�|�d��rJt�d|�}ttt	|�
���\}	}
}g}t|�D]F}
|dt|�}}t�d|�}ttt	|�
���\}}|�||f�q�|dt|�}}|||	|
f<|dt|�}}q�t�d	|�}ttt	|�
���\}}g}t|�D]R}
|dt|�}}t�d
|�}ttt	|�
���\}}	}
||	|
f}|�|��qx|�|�|dt|�}}|dt|�}}q�||_
i}t�d|�}t	|�d��}t|�D]�}|dt|�}}t�d|�}|�d
�}ttt	|�dddd���\}}}}||}|dt|�}}t�d|�}i}t|�d��}t|�D]@\}}t|�}td�D]$}|d|>@�r�d||d|<�qΐq�||f||<�q(|dt|�}}||_g}|dt|�}}t�d|�}t	|�d��}t|�D]^}|dt|�}}t�d|�}|�
�\}}t	|�}|dk�r�d}nt|�}|�||f��qX|dt|�}}||_|dt|�}}|dt|�}}t�d|�}t	|�d��}|dt|�}}|dt|�}}t�d|�}t	|�d��}|dt|�}}t�d|�}t	|�d��} | |_|dt|�}}z|dt|�}}Wntk
�r�YnXdS)a�Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        rFNrr
zstatic arc z)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$z\s+{(\d+), (\d+)},$z'static state states_(\d+)\[(\d+)\] = {$z\s+{(\d+), arcs_(\d+)_(\d+)},$zstatic dfa dfas\[(\d+)\] = {$z0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$����z\s+("(?:\\\d\d\d)*")},$�z!static label labels\[(\d+)\] = {$z\s+{(\d+), (0|"\w+")},$�0z
\s+(\d+),$z\s+{(\d+), labels},$z	\s+(\d+)$)rrr�next�
startswithrr�list�maprr�range�append�states�group�eval�	enumerate�ord�dfas�labels�start�
StopIteration)!rrrrrrZallarcsr*r�n�m�kZarcs�_�i�j�s�t�stater/Zndfasrr�x�y�z�firstZ	rawbitset�cZbyter0Znlabelsr1r	r	r
rTs��
�
"
zConverter.parse_graminit_ccCsXi|_i|_t|j�D]<\}\}}|tjkr@|dk	r@||j|<q|dkr||j|<qdS)z1Create additional useful structures.  (Internal).N)�keywords�tokensr-r0r�NAME)rZilabel�type�valuer	r	r
r�szConverter.finish_offN)�__name__�
__module__�__qualname__�__doc__rrrrr	r	r	r
r$s
&r)rIrZpgen2rrZGrammarrr	r	r	r
�<module>s__pycache__/parse.cpython-38.opt-2.pyc000064400000006256150521473260013474 0ustar00U

e5d��@s0ddlmZGdd�de�ZGdd�de�ZdS)�)�tokenc@seZdZdd�Zdd�ZdS)�
ParseErrorcCs4t�|d||||f�||_||_||_||_dS)Nz!%s: type=%r, value=%r, context=%r)�	Exception�__init__�msg�type�value�context)�selfrrrr	�r�+/usr/lib64/python3.8/lib2to3/pgen2/parse.pyrs
�zParseError.__init__cCst|�|j|j|j|jffS�N)rrrr	)r
rrr�
__reduce__szParseError.__reduce__N)�__name__�
__module__�__qualname__rrrrrrrsrc@sHeZdZddd�Zddd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dS)�ParserNcCs||_|pdd�|_dS)NcSs|Sr
r)�grammar�noderrr�<lambda>Z�z!Parser.__init__.<locals>.<lambda>)r�convert)r
rrrrrr<szParser.__init__cCsH|dkr|jj}|ddgf}|jj|d|f}|g|_d|_t�|_dS)N�)r�start�dfas�stack�rootnode�set�
used_names)r
r�newnodeZ
stackentryrrr�setup\s
zParser.setupcCs0|�|||�}|jd\}}}|\}}	||}
|
D]�\}}|jj|\}
}||kr�|�||||�|}||d|fgkr�|��|js�dS|jd\}}}|\}}	qfdS|
dkr2|jj|
}|\}}||kr2|�|
|jj|
||�qq2d|f|
k�r|��|j�s*td|||��qtd|||��qdS)N���rTF�ztoo much inputz	bad input)	�classifyrr�labels�shift�popr�pushr)r
rrr	�ilabel�dfa�staterZstates�firstZarcs�i�newstate�t�vZitsdfaZ	itsstatesZitsfirstrrr�addtokents>
�zParser.addtokencCsX|tjkr0|j�|�|jj�|�}|dk	r0|S|jj�|�}|dkrTtd|||��|S)Nz	bad token)	r�NAMEr�addr�keywords�get�tokensr)r
rrr	r(rrrr#�s
zParser.classifyc	CsT|jd\}}}|||df}|�|j|�}|dk	r@|d�|�|||f|jd<dS�Nr!)rrr�append)	r
rrr-r	r)r*rrrrrr%�szParser.shiftc	CsB|jd\}}}|d|gf}|||f|jd<|j�|d|f�dS)Nr!r)rr7)	r
rZnewdfar-r	r)r*rrrrrr'�szParser.pushcCs`|j��\}}}|�|j|�}|dk	r\|jrL|jd\}}}|d�|�n||_|j|j_dSr6)rr&rrr7rr)r
ZpopdfaZpopstateZpopnoderr)r*rrrrr&�sz
Parser.pop)N)N)
rrrrr r0r#r%r'r&rrrrrs
 
0	rN)�rrr�objectrrrrr�<module>s__pycache__/grammar.cpython-38.opt-1.pyc000064400000013043150521473260013777 0ustar00U

e5d��@s`dZddlZddlmZGdd�de�ZdZiZe��D]"Z	e	r8e	�
�\ZZe
ee�ee<q8dS)a�This module defines the data structures used to represent a grammar.

These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.

There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.

�N�)�tokenc@s@eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dS)�Grammara�	Pgen parsing tables conversion class.

    Once initialized, this class supplies the grammar tables for the
    parsing engine implemented by parse.py.  The parsing engine
    accesses the instance variables directly.  The class here does not
    provide initialization of the tables; several subclasses exist to
    do this (see the conv and pgen modules).

    The load() method reads the tables from a pickle file, which is
    much faster than the other ways offered by subclasses.  The pickle
    file is written by calling dump() (after loading the grammar
    tables using a subclass).  The report() method prints a readable
    representation of the tables to stdout, for debugging.

    The instance variables are as follows:

    symbol2number -- a dict mapping symbol names to numbers.  Symbol
                     numbers are always 256 or higher, to distinguish
                     them from token numbers, which are between 0 and
                     255 (inclusive).

    number2symbol -- a dict mapping numbers to symbol names;
                     these two are each other's inverse.

    states        -- a list of DFAs, where each DFA is a list of
                     states, each state is a list of arcs, and each
                     arc is a (i, j) pair where i is a label and j is
                     a state number.  The DFA number is the index into
                     this list.  (This name is slightly confusing.)
                     Final states are represented by a special arc of
                     the form (0, j) where j is its own state number.

    dfas          -- a dict mapping symbol numbers to (DFA, first)
                     pairs, where DFA is an item from the states list
                     above, and first is a set of tokens that can
                     begin this grammar rule (represented by a dict
                     whose values are always 1).

    labels        -- a list of (x, y) pairs where x is either a token
                     number or a symbol number, and y is either None
                     or a string; the strings are keywords.  The label
                     number is the index in this list; label numbers
                     are used to mark state transitions (arcs) in the
                     DFAs.

    start         -- the number of the grammar's start symbol.

    keywords      -- a dict mapping keyword strings to arc labels.

    tokens        -- a dict mapping token numbers to arc labels.

    cCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)N)rZEMPTY�)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.8/lib2to3/pgen2/grammar.py�__init__LszGrammar.__init__c	Cs,t|d��}t�|j|tj�W5QRXdS)z)Dump the grammar tables to a pickle file.�wbN)�open�pickle�dump�__dict__ZHIGHEST_PROTOCOL)r�filename�frrrrWszGrammar.dumpc	Cs0t|d��}t�|�}W5QRX|j�|�dS)z+Load the grammar tables from a pickle file.�rbN)rr�loadr�update)rrr�drrrr\szGrammar.loadcCs|j�t�|��dS)z3Load the grammar tables from a pickle bytes object.N)rrr�loads)rZpklrrrrbsz
Grammar.loadscCsT|��}dD]}t||t||����q|jdd�|_|jdd�|_|j|_|S)z#
        Copy the grammar.
        )rrr	rrr
N)�	__class__�setattr�getattr�copyr
rr)r�newZ	dict_attrrrrr"fszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�d	S)
z:Dump the grammar tables to standard output, for debugging.r)�pprintZs2nZn2srr	r
rN)r$�printrrrr	r
r)rr$rrr�reportss




zGrammar.reportN)
�__name__�
__module__�__qualname__�__doc__rrrrr"r&rrrrrs5
ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
)r*r�r�objectrZ	opmap_rawZopmap�
splitlines�line�split�op�namer!rrrr�<module>so3__pycache__/grammar.cpython-38.opt-2.pyc000064400000004724150521473260014006 0ustar00U

e5d��@s\ddlZddlmZGdd�de�ZdZiZe��D]"Zer4e�	�\Z
Zeee�ee
<q4dS)�N�)�tokenc@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)�GrammarcCs<i|_i|_g|_i|_dg|_i|_i|_i|_d|_dS)N)rZEMPTY�)	�
symbol2number�
number2symbol�states�dfas�labels�keywords�tokens�symbol2label�start)�self�r�-/usr/lib64/python3.8/lib2to3/pgen2/grammar.py�__init__LszGrammar.__init__c	Cs,t|d��}t�|j|tj�W5QRXdS)N�wb)�open�pickle�dump�__dict__ZHIGHEST_PROTOCOL)r�filename�frrrrWszGrammar.dumpc	Cs0t|d��}t�|�}W5QRX|j�|�dS)N�rb)rr�loadr�update)rrr�drrrr\szGrammar.loadcCs|j�t�|��dS)N)rrr�loads)rZpklrrrrbsz
Grammar.loadscCsT|��}dD]}t||t||����q|jdd�|_|jdd�|_|j|_|S)N)rrr	rrr
)�	__class__�setattr�getattr�copyr
rr)r�newZ	dict_attrrrrr"fszGrammar.copycCsvddlm}td�||j�td�||j�td�||j�td�||j�td�||j�td|j�dS)	Nr)�pprintZs2nZn2srr	r
r)r$�printrrrr	r
r)rr$rrr�reportss




zGrammar.reportN)	�__name__�
__module__�__qualname__rrrrr"r&rrrrrs6
ra
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
)
r�r�objectrZ	opmap_rawZopmap�
splitlines�line�split�op�namer!rrrr�<module>so3__pycache__/__init__.cpython-38.pyc000064400000000247150521473260013153 0ustar00U

e5d��@sdZdS)zThe pgen2 package.N)�__doc__�rr�./usr/lib64/python3.8/lib2to3/pgen2/__init__.py�<module>�