Server: appserver-7f0f8755-nginx-15961cad18524ec5a9db05f2a6a7e440
Current directory: /usr/lib/python3.11
Software: nginx/1.27.5
Shell Command
Create a new file
Upload file
File: tokenize.py
"""Tokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ __author__ = 'Ka-Ping Yee
' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') from builtins import open as _builtin_open from codecs import lookup, BOM_UTF8 import collections import functools from io import TextIOWrapper import itertools as _itertools import re import sys from token import * from token import EXACT_TOKEN_TYPES cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", "untokenize", "TokenInfo"] del token class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' Binnumber = r'0[bB](?:_?[01])+' Octnumber = r'0[oO](?:_?[0-7])+' Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) Expfloat = r'[0-9](?:_?[0-9])*' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Return the empty string, plus all of the valid string prefixes. def _all_string_prefixes(): # The valid string prefixes. Only contain the lower case versions, # and don't contain any permutations (include 'fr', but not # 'rf'). The various permutations will be generated. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] # if we add binary f-strings, add: ['fb', 'fbr'] result = {''} for prefix in _valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character for u in _itertools.product(*[(c, c.upper()) for c in t]): result.add(''.join(u)) return result @functools.lru_cache def _compile(expr): return re.compile(expr, re.UNICODE) # Note that since _all_string_prefixes includes the empty string, # StringPrefix can be the empty string (making it optional). StringPrefix = group(*_all_string_prefixes()) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Sorting in reverse order puts the long operators before their prefixes. # Otherwise if = came before ==, == would get recognized as two instances # of =. Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) Funny = group(r'\r?\n', Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) # For a given string prefix plus quotes, endpats maps it to a regex # to match the remainder of that string. _prefix can be empty, for # a normal single or triple quoted string (with no prefix). endpats = {} for _prefix in _all_string_prefixes(): endpats[_prefix + "'"] = Single endpats[_prefix + '"'] = Double endpats[_prefix + "'''"] = Single3 endpats[_prefix + '"""'] = Double3 del _prefix # A set of all of the single and triple quoted string prefixes, # including the opening quotes. single_quoted = set() triple_quoted = set() for t in _all_string_prefixes(): for u in (t + '"', t + "'"): single_quoted.add(u) for u in (t + '"""', t + "'''"): triple_quoted.add(u) del t, u tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in _itertools.chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in (NAME, NUMBER): tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = _builtin_open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except: buffer.close() raise def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the physical line. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) empty = _itertools.repeat(b"") rl_gen = _itertools.chain(consumed, iter(readline, b""), empty) return _tokenize(rl_gen.__next__, encoding) def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') last_line = b'' line = b'' while True: # loop over lines in stream try: # We capture the value of the line variable here because # readline uses the empty string '' to signal end of input, # hence `line` itself will always be overwritten at the end # of this loop. last_line = line line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) pos += len(comment_token) yield TokenInfo(NL, line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("
", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': if parenlev > 0: yield TokenInfo(NL, token, spos, epos, line) else: yield TokenInfo(NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif (initial in single_quoted or token[:2] in single_quoted or token[:3] in single_quoted): if token[-1] == '\n': # continued string strstart = (lnum, start) # Again, using the first 3 chars of the # token. This is looking for the matching end # regex for the correct type of quote # character. So it's really looking for # endpats["'"] or endpats['"'], by trying to # skip string prefix characters, if any. endprog = _compile(endpats.get(initial) or endpats.get(token[1]) or endpats.get(token[2])) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial.isidentifier(): # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos += 1 # Add an implicit NEWLINE if the input doesn't end in one if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"): yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '') for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') def generate_tokens(readline): """Tokenize a source reading Python code as unicode strings. This has the same API as tokenize(), except that it expects the *readline* callable to return str objects instead of bytes. """ return _tokenize(readline, None) def main(): import argparse # Helper error handling routines def perror(message): sys.stderr.write(message) sys.stderr.write('\n') def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "
" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise def _generate_tokens_from_c_tokenizer(source): """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" import _tokenize as c_tokenizer for info in c_tokenizer.TokenizerIter(source): tok, type, lineno, end_lineno, col_off, end_col_off, line = info yield TokenInfo(type, tok, (lineno, col_off), (end_lineno, end_col_off), line) if __name__ == "__main__": main()
.
204 Items
Change directory
Remove directory
Rename directory
..
72 Items
Change directory
Remove directory
Rename directory
EXTERNALLY-MANAGED
0.63 KB
Edit
Delete
Copy
Move
Remame
LICENSE.txt
13.61 KB
Edit
Delete
Copy
Move
Remame
__future__.py
5.1 KB
Edit
Delete
Copy
Move
Remame
__hello__.py
0.22 KB
Edit
Delete
Copy
Move
Remame
__phello__
3 Items
Change directory
Remove directory
Rename directory
__pycache__
171 Items
Change directory
Remove directory
Rename directory
_aix_support.py
3.31 KB
Edit
Delete
Copy
Move
Remame
_bootsubprocess.py
2.61 KB
Edit
Delete
Copy
Move
Remame
_collections_abc.py
29.49 KB
Edit
Delete
Copy
Move
Remame
_compat_pickle.py
8.56 KB
Edit
Delete
Copy
Move
Remame
_compression.py
5.55 KB
Edit
Delete
Copy
Move
Remame
_distutils_system_mod.py
6.16 KB
Edit
Delete
Copy
Move
Remame
_markupbase.py
14.31 KB
Edit
Delete
Copy
Move
Remame
_osx_support.py
21.28 KB
Edit
Delete
Copy
Move
Remame
_py_abc.py
6.04 KB
Edit
Delete
Copy
Move
Remame
_pydecimal.py
223.83 KB
Edit
Delete
Copy
Move
Remame
_pyio.py
91.83 KB
Edit
Delete
Copy
Move
Remame
_sitebuiltins.py
3.05 KB
Edit
Delete
Copy
Move
Remame
_strptime.py
24.68 KB
Edit
Delete
Copy
Move
Remame
_sysconfigdata__linux_x86_64-linux-gnu.py
42.36 KB
Edit
Delete
Copy
Move
Remame
_sysconfigdata__x86_64-linux-gnu.py
42.36 KB
Edit
Delete
Copy
Move
Remame
_threading_local.py
7.05 KB
Edit
Delete
Copy
Move
Remame
_weakrefset.py
5.75 KB
Edit
Delete
Copy
Move
Remame
abc.py
6.37 KB
Edit
Delete
Copy
Move
Remame
aifc.py
33.41 KB
Edit
Delete
Copy
Move
Remame
antigravity.py
0.49 KB
Edit
Delete
Copy
Move
Remame
argparse.py
97.28 KB
Edit
Delete
Copy
Move
Remame
ast.py
59.25 KB
Edit
Delete
Copy
Move
Remame
asynchat.py
11.3 KB
Edit
Delete
Copy
Move
Remame
asyncio
34 Items
Change directory
Remove directory
Rename directory
asyncore.py
19.83 KB
Edit
Delete
Copy
Move
Remame
base64.py
20.53 KB
Edit
Delete
Copy
Move
Remame
bdb.py
31.59 KB
Edit
Delete
Copy
Move
Remame
bisect.py
3.06 KB
Edit
Delete
Copy
Move
Remame
bz2.py
11.57 KB
Edit
Delete
Copy
Move
Remame
cProfile.py
6.19 KB
Edit
Delete
Copy
Move
Remame
calendar.py
24.17 KB
Edit
Delete
Copy
Move
Remame
cgi.py
33.61 KB
Edit
Delete
Copy
Move
Remame
cgitb.py
12.13 KB
Edit
Delete
Copy
Move
Remame
chunk.py
5.37 KB
Edit
Delete
Copy
Move
Remame
cmd.py
14.52 KB
Edit
Delete
Copy
Move
Remame
code.py
10.37 KB
Edit
Delete
Copy
Move
Remame
codecs.py
35.85 KB
Edit
Delete
Copy
Move
Remame
codeop.py
5.47 KB
Edit
Delete
Copy
Move
Remame
collections
3 Items
Change directory
Remove directory
Rename directory
colorsys.py
3.93 KB
Edit
Delete
Copy
Move
Remame
compileall.py
19.78 KB
Edit
Delete
Copy
Move
Remame
concurrent
3 Items
Change directory
Remove directory
Rename directory
configparser.py
53.96 KB
Edit
Delete
Copy
Move
Remame
contextlib.py
26.44 KB
Edit
Delete
Copy
Move
Remame
contextvars.py
0.13 KB
Edit
Delete
Copy
Move
Remame
copy.py
8.48 KB
Edit
Delete
Copy
Move
Remame
copyreg.py
7.5 KB
Edit
Delete
Copy
Move
Remame
crypt.py
3.82 KB
Edit
Delete
Copy
Move
Remame
csv.py
15.65 KB
Edit
Delete
Copy
Move
Remame
ctypes
6 Items
Change directory
Remove directory
Rename directory
curses
6 Items
Change directory
Remove directory
Rename directory
dataclasses.py
56.5 KB
Edit
Delete
Copy
Move
Remame
datetime.py
89.85 KB
Edit
Delete
Copy
Move
Remame
dbm
5 Items
Change directory
Remove directory
Rename directory
decimal.py
0.31 KB
Edit
Delete
Copy
Move
Remame
difflib.py
81.36 KB
Edit
Delete
Copy
Move
Remame
dis.py
28.28 KB
Edit
Delete
Copy
Move
Remame
distutils
31 Items
Change directory
Remove directory
Rename directory
doctest.py
102.71 KB
Edit
Delete
Copy
Move
Remame
email
23 Items
Change directory
Remove directory
Rename directory
encodings
123 Items
Change directory
Remove directory
Rename directory
enum.py
76.81 KB
Edit
Delete
Copy
Move
Remame
filecmp.py
9.94 KB
Edit
Delete
Copy
Move
Remame
fileinput.py
15.33 KB
Edit
Delete
Copy
Move
Remame
fnmatch.py
5.86 KB
Edit
Delete
Copy
Move
Remame
fractions.py
28 KB
Edit
Delete
Copy
Move
Remame
ftplib.py
34.66 KB
Edit
Delete
Copy
Move
Remame
functools.py
37.51 KB
Edit
Delete
Copy
Move
Remame
genericpath.py
4.86 KB
Edit
Delete
Copy
Move
Remame
getopt.py
7.31 KB
Edit
Delete
Copy
Move
Remame
getpass.py
5.85 KB
Edit
Delete
Copy
Move
Remame
gettext.py
20.8 KB
Edit
Delete
Copy
Move
Remame
glob.py
8.48 KB
Edit
Delete
Copy
Move
Remame
graphlib.py
9.43 KB
Edit
Delete
Copy
Move
Remame
gzip.py
23.51 KB
Edit
Delete
Copy
Move
Remame
hashlib.py
11.49 KB
Edit
Delete
Copy
Move
Remame
heapq.py
22.48 KB
Edit
Delete
Copy
Move
Remame
hmac.py
7.54 KB
Edit
Delete
Copy
Move
Remame
html
4 Items
Change directory
Remove directory
Rename directory
http
6 Items
Change directory
Remove directory
Rename directory
imaplib.py
53.58 KB
Edit
Delete
Copy
Move
Remame
imghdr.py
3.86 KB
Edit
Delete
Copy
Move
Remame
imp.py
10.36 KB
Edit
Delete
Copy
Move
Remame
importlib
12 Items
Change directory
Remove directory
Rename directory
inspect.py
121.28 KB
Edit
Delete
Copy
Move
Remame
io.py
4.14 KB
Edit
Delete
Copy
Move
Remame
ipaddress.py
76.45 KB
Edit
Delete
Copy
Move
Remame
json
6 Items
Change directory
Remove directory
Rename directory
keyword.py
1.04 KB
Edit
Delete
Copy
Move
Remame
lib-dynload
46 Items
Change directory
Remove directory
Rename directory
lib2to3
16 Items
Change directory
Remove directory
Rename directory
linecache.py
5.56 KB
Edit
Delete
Copy
Move
Remame
locale.py
77.15 KB
Edit
Delete
Copy
Move
Remame
logging
4 Items
Change directory
Remove directory
Rename directory
lzma.py
12.97 KB
Edit
Delete
Copy
Move
Remame
mailbox.py
76.95 KB
Edit
Delete
Copy
Move
Remame
mailcap.py
9.15 KB
Edit
Delete
Copy
Move
Remame
mimetypes.py
22.26 KB
Edit
Delete
Copy
Move
Remame
modulefinder.py
23.14 KB
Edit
Delete
Copy
Move
Remame
multiprocessing
23 Items
Change directory
Remove directory
Rename directory
netrc.py
6.77 KB
Edit
Delete
Copy
Move
Remame
nntplib.py
40.12 KB
Edit
Delete
Copy
Move
Remame
ntpath.py
28.95 KB
Edit
Delete
Copy
Move
Remame
nturl2path.py
2.82 KB
Edit
Delete
Copy
Move
Remame
numbers.py
10.11 KB
Edit
Delete
Copy
Move
Remame
opcode.py
10.2 KB
Edit
Delete
Copy
Move
Remame
operator.py
10.71 KB
Edit
Delete
Copy
Move
Remame
optparse.py
58.95 KB
Edit
Delete
Copy
Move
Remame
os.py
38.58 KB
Edit
Delete
Copy
Move
Remame
pathlib.py
47.44 KB
Edit
Delete
Copy
Move
Remame
pdb.py
62.4 KB
Edit
Delete
Copy
Move
Remame
pickle.py
63.43 KB
Edit
Delete
Copy
Move
Remame
pickletools.py
91.29 KB
Edit
Delete
Copy
Move
Remame
pipes.py
8.77 KB
Edit
Delete
Copy
Move
Remame
pkgutil.py
24.04 KB
Edit
Delete
Copy
Move
Remame
platform.py
41.28 KB
Edit
Delete
Copy
Move
Remame
plistlib.py
27.59 KB
Edit
Delete
Copy
Move
Remame
poplib.py
14.84 KB
Edit
Delete
Copy
Move
Remame
posixpath.py
16.61 KB
Edit
Delete
Copy
Move
Remame
pprint.py
23.92 KB
Edit
Delete
Copy
Move
Remame
profile.py
22.33 KB
Edit
Delete
Copy
Move
Remame
pstats.py
28.67 KB
Edit
Delete
Copy
Move
Remame
pty.py
5.09 KB
Edit
Delete
Copy
Move
Remame
py_compile.py
7.69 KB
Edit
Delete
Copy
Move
Remame
pyclbr.py
11.13 KB
Edit
Delete
Copy
Move
Remame
pydoc.py
106.57 KB
Edit
Delete
Copy
Move
Remame
pydoc_data
4 Items
Change directory
Remove directory
Rename directory
queue.py
11.23 KB
Edit
Delete
Copy
Move
Remame
quopri.py
7.1 KB
Edit
Delete
Copy
Move
Remame
random.py
31.41 KB
Edit
Delete
Copy
Move
Remame
re
6 Items
Change directory
Remove directory
Rename directory
reprlib.py
5.31 KB
Edit
Delete
Copy
Move
Remame
rlcompleter.py
7.64 KB
Edit
Delete
Copy
Move
Remame
runpy.py
12.85 KB
Edit
Delete
Copy
Move
Remame
sched.py
6.2 KB
Edit
Delete
Copy
Move
Remame
secrets.py
1.98 KB
Edit
Delete
Copy
Move
Remame
selectors.py
19.03 KB
Edit
Delete
Copy
Move
Remame
shelve.py
8.36 KB
Edit
Delete
Copy
Move
Remame
shlex.py
13.18 KB
Edit
Delete
Copy
Move
Remame
shutil.py
53.58 KB
Edit
Delete
Copy
Move
Remame
signal.py
2.38 KB
Edit
Delete
Copy
Move
Remame
site.py
23.17 KB
Edit
Delete
Copy
Move
Remame
sitecustomize.py
0.15 KB
Edit
Delete
Copy
Move
Remame
smtpd.py
30.43 KB
Edit
Delete
Copy
Move
Remame
smtplib.py
44.35 KB
Edit
Delete
Copy
Move
Remame
sndhdr.py
7.27 KB
Edit
Delete
Copy
Move
Remame
socket.py
36.41 KB
Edit
Delete
Copy
Move
Remame
socketserver.py
26.94 KB
Edit
Delete
Copy
Move
Remame
sqlite3
4 Items
Change directory
Remove directory
Rename directory
sre_compile.py
0.23 KB
Edit
Delete
Copy
Move
Remame
sre_constants.py
0.23 KB
Edit
Delete
Copy
Move
Remame
sre_parse.py
0.22 KB
Edit
Delete
Copy
Move
Remame
ssl.py
52.71 KB
Edit
Delete
Copy
Move
Remame
stat.py
5.36 KB
Edit
Delete
Copy
Move
Remame
statistics.py
46.59 KB
Edit
Delete
Copy
Move
Remame
string.py
11.51 KB
Edit
Delete
Copy
Move
Remame
stringprep.py
12.61 KB
Edit
Delete
Copy
Move
Remame
struct.py
0.25 KB
Edit
Delete
Copy
Move
Remame
subprocess.py
83.74 KB
Edit
Delete
Copy
Move
Remame
sunau.py
18.05 KB
Edit
Delete
Copy
Move
Remame
symtable.py
10.13 KB
Edit
Delete
Copy
Move
Remame
sysconfig.py
31.35 KB
Edit
Delete
Copy
Move
Remame
tabnanny.py
11.03 KB
Edit
Delete
Copy
Move
Remame
tarfile.py
95.25 KB
Edit
Delete
Copy
Move
Remame
telnetlib.py
22.75 KB
Edit
Delete
Copy
Move
Remame
tempfile.py
34.66 KB
Edit
Delete
Copy
Move
Remame
test
10 Items
Change directory
Remove directory
Rename directory
textwrap.py
19.26 KB
Edit
Delete
Copy
Move
Remame
this.py
0.98 KB
Edit
Delete
Copy
Move
Remame
threading.py
56.46 KB
Edit
Delete
Copy
Move
Remame
timeit.py
13.18 KB
Edit
Delete
Copy
Move
Remame
token.py
2.33 KB
Edit
Delete
Copy
Move
Remame
tokenize.py
25.72 KB
Edit
Delete
Copy
Move
Remame
tomllib
5 Items
Change directory
Remove directory
Rename directory
trace.py
28.52 KB
Edit
Delete
Copy
Move
Remame
traceback.py
37.52 KB
Edit
Delete
Copy
Move
Remame
tracemalloc.py
17.62 KB
Edit
Delete
Copy
Move
Remame
tty.py
0.86 KB
Edit
Delete
Copy
Move
Remame
turtle.py
140.97 KB
Edit
Delete
Copy
Move
Remame
types.py
9.83 KB
Edit
Delete
Copy
Move
Remame
typing.py
114.35 KB
Edit
Delete
Copy
Move
Remame
unittest
14 Items
Change directory
Remove directory
Rename directory
urllib
7 Items
Change directory
Remove directory
Rename directory
uu.py
6.86 KB
Edit
Delete
Copy
Move
Remame
uuid.py
26.95 KB
Edit
Delete
Copy
Move
Remame
venv
4 Items
Change directory
Remove directory
Rename directory
warnings.py
20.53 KB
Edit
Delete
Copy
Move
Remame
wave.py
21.33 KB
Edit
Delete
Copy
Move
Remame
weakref.py
21.01 KB
Edit
Delete
Copy
Move
Remame
webbrowser.py
24.5 KB
Edit
Delete
Copy
Move
Remame
wsgiref
8 Items
Change directory
Remove directory
Rename directory
xdrlib.py
5.84 KB
Edit
Delete
Copy
Move
Remame
xml
6 Items
Change directory
Remove directory
Rename directory
xmlrpc
4 Items
Change directory
Remove directory
Rename directory
zipapp.py
7.36 KB
Edit
Delete
Copy
Move
Remame
zipfile.py
90.81 KB
Edit
Delete
Copy
Move
Remame
zipimport.py
30.17 KB
Edit
Delete
Copy
Move
Remame
zoneinfo
5 Items
Change directory
Remove directory
Rename directory