Linux server1.sbs.cy 5.14.0-362.18.1.el9_3.x86_64 #1 SMP PREEMPT_DYNAMIC Mon Jan 29 07:05:48 EST 2024 x86_64
Apache
: 199.192.25.12 | : 172.70.127.199
28 Domain
8.1.31
administrator
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
usr /
lib64 /
python3.9 /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
asyncio
[ DIR ]
drwxr-xr-x
collections
[ DIR ]
drwxr-xr-x
concurrent
[ DIR ]
drwxr-xr-x
config-3.9-x86_64-linux-gnu
[ DIR ]
drwxr-xr-x
ctypes
[ DIR ]
drwxr-xr-x
curses
[ DIR ]
drwxr-xr-x
dbm
[ DIR ]
drwxr-xr-x
distutils
[ DIR ]
drwxr-xr-x
email
[ DIR ]
drwxr-xr-x
encodings
[ DIR ]
drwxr-xr-x
ensurepip
[ DIR ]
drwxr-xr-x
html
[ DIR ]
drwxr-xr-x
http
[ DIR ]
drwxr-xr-x
importlib
[ DIR ]
drwxr-xr-x
json
[ DIR ]
drwxr-xr-x
lib-dynload
[ DIR ]
drwxr-xr-x
lib2to3
[ DIR ]
drwxr-xr-x
logging
[ DIR ]
drwxr-xr-x
multiprocessing
[ DIR ]
drwxr-xr-x
pydoc_data
[ DIR ]
drwxr-xr-x
site-packages
[ DIR ]
drwxr-xr-x
sqlite3
[ DIR ]
drwxr-xr-x
unittest
[ DIR ]
drwxr-xr-x
urllib
[ DIR ]
drwxr-xr-x
venv
[ DIR ]
drwxr-xr-x
wsgiref
[ DIR ]
drwxr-xr-x
xml
[ DIR ]
drwxr-xr-x
xmlrpc
[ DIR ]
drwxr-xr-x
zoneinfo
[ DIR ]
drwxr-xr-x
LICENSE.txt
13.61
KB
-rw-r--r--
__future__.py
5.03
KB
-rw-r--r--
__phello__.foo.py
64
B
-rw-r--r--
_aix_support.py
3.31
KB
-rw-r--r--
_bootlocale.py
1.76
KB
-rw-r--r--
_bootsubprocess.py
2.61
KB
-rw-r--r--
_collections_abc.py
28.69
KB
-rw-r--r--
_compat_pickle.py
8.54
KB
-rw-r--r--
_compression.py
5.21
KB
-rw-r--r--
_markupbase.py
14.28
KB
-rw-r--r--
_osx_support.py
21.26
KB
-rw-r--r--
_py_abc.py
6.04
KB
-rw-r--r--
_pydecimal.py
223.31
KB
-rw-r--r--
_pyio.py
91.13
KB
-rw-r--r--
_sitebuiltins.py
3.04
KB
-rw-r--r--
_strptime.py
24.68
KB
-rw-r--r--
_sysconfigdata__linux_x86_64-l...
40.27
KB
-rw-r--r--
_sysconfigdata_d_linux_x86_64-...
40.08
KB
-rw-r--r--
_threading_local.py
7.05
KB
-rw-r--r--
_weakrefset.py
5.78
KB
-rw-r--r--
abc.py
4.8
KB
-rw-r--r--
aifc.py
31.84
KB
-rw-r--r--
antigravity.py
500
B
-rw-r--r--
argparse.py
95.82
KB
-rw-r--r--
ast.py
54.94
KB
-rw-r--r--
asynchat.py
11.06
KB
-rw-r--r--
asyncore.py
19.63
KB
-rw-r--r--
base64.py
19.39
KB
-rwxr-xr-x
bdb.py
30.65
KB
-rw-r--r--
binhex.py
14.44
KB
-rw-r--r--
bisect.py
2.29
KB
-rw-r--r--
bz2.py
12.16
KB
-rw-r--r--
cProfile.py
6.2
KB
-rwxr-xr-x
calendar.py
24.25
KB
-rw-r--r--
cgi.py
33.14
KB
-rwxr-xr-x
cgitb.py
11.81
KB
-rw-r--r--
chunk.py
5.31
KB
-rw-r--r--
cmd.py
14.51
KB
-rw-r--r--
code.py
10.37
KB
-rw-r--r--
codecs.py
35.81
KB
-rw-r--r--
codeop.py
6.18
KB
-rw-r--r--
colorsys.py
3.97
KB
-rw-r--r--
compileall.py
19.63
KB
-rw-r--r--
configparser.py
53.3
KB
-rw-r--r--
contextlib.py
24.05
KB
-rw-r--r--
contextvars.py
129
B
-rw-r--r--
copy.py
8.45
KB
-rw-r--r--
copyreg.py
7.1
KB
-rw-r--r--
crypt.py
3.73
KB
-rw-r--r--
csv.py
15.77
KB
-rw-r--r--
dataclasses.py
48.42
KB
-rw-r--r--
datetime.py
87.09
KB
-rw-r--r--
decimal.py
320
B
-rw-r--r--
difflib.py
81.35
KB
-rw-r--r--
dis.py
20.09
KB
-rw-r--r--
doctest.py
102.12
KB
-rw-r--r--
enum.py
38.52
KB
-rw-r--r--
filecmp.py
9.79
KB
-rw-r--r--
fileinput.py
14.44
KB
-rw-r--r--
fnmatch.py
5.86
KB
-rw-r--r--
formatter.py
14.79
KB
-rw-r--r--
fractions.py
23.75
KB
-rw-r--r--
ftplib.py
34.66
KB
-rw-r--r--
functools.py
37.97
KB
-rw-r--r--
genericpath.py
4.86
KB
-rw-r--r--
getopt.py
7.31
KB
-rw-r--r--
getpass.py
5.85
KB
-rw-r--r--
gettext.py
26.63
KB
-rw-r--r--
glob.py
5.69
KB
-rw-r--r--
graphlib.py
9.35
KB
-rw-r--r--
gzip.py
21.26
KB
-rw-r--r--
hashlib.py
7.88
KB
-rw-r--r--
heapq.py
22.34
KB
-rw-r--r--
hmac.py
7.85
KB
-rw-r--r--
imaplib.py
53.62
KB
-rw-r--r--
imghdr.py
3.72
KB
-rw-r--r--
imp.py
10.29
KB
-rw-r--r--
inspect.py
115.46
KB
-rw-r--r--
io.py
3.46
KB
-rw-r--r--
ipaddress.py
76.79
KB
-rw-r--r--
keyword.py
1.02
KB
-rw-r--r--
linecache.py
5.33
KB
-rw-r--r--
locale.py
76.44
KB
-rw-r--r--
lzma.py
12.92
KB
-rw-r--r--
mailbox.py
76.95
KB
-rw-r--r--
mailcap.py
8.9
KB
-rw-r--r--
mimetypes.py
21.06
KB
-rw-r--r--
modulefinder.py
23.83
KB
-rw-r--r--
netrc.py
5.44
KB
-rw-r--r--
nntplib.py
40.06
KB
-rw-r--r--
ntpath.py
27.08
KB
-rw-r--r--
nturl2path.py
2.82
KB
-rw-r--r--
numbers.py
10.1
KB
-rw-r--r--
opcode.py
5.53
KB
-rw-r--r--
operator.py
10.5
KB
-rw-r--r--
optparse.py
58.95
KB
-rw-r--r--
os.py
38.15
KB
-rw-r--r--
pathlib.py
52.81
KB
-rw-r--r--
pdb.py
61.75
KB
-rwxr-xr-x
pickle.py
63.4
KB
-rw-r--r--
pickletools.py
91.29
KB
-rw-r--r--
pipes.py
8.71
KB
-rw-r--r--
pkgutil.py
23.71
KB
-rw-r--r--
platform.py
39.65
KB
-rwxr-xr-x
plistlib.py
27.59
KB
-rw-r--r--
poplib.py
14.84
KB
-rw-r--r--
posixpath.py
15.35
KB
-rw-r--r--
pprint.py
22
KB
-rw-r--r--
profile.py
22.34
KB
-rwxr-xr-x
pstats.py
28.64
KB
-rw-r--r--
pty.py
4.69
KB
-rw-r--r--
py_compile.py
8.01
KB
-rw-r--r--
pyclbr.py
14.9
KB
-rw-r--r--
pydoc.py
107.03
KB
-rwxr-xr-x
queue.py
11.23
KB
-rw-r--r--
quopri.py
7.1
KB
-rwxr-xr-x
random.py
30.75
KB
-rw-r--r--
re.py
15.49
KB
-rw-r--r--
reprlib.py
5.14
KB
-rw-r--r--
rlcompleter.py
7.47
KB
-rw-r--r--
runpy.py
12.78
KB
-rw-r--r--
sched.py
6.29
KB
-rw-r--r--
secrets.py
1.99
KB
-rw-r--r--
selectors.py
19.08
KB
-rw-r--r--
shelve.py
8.33
KB
-rw-r--r--
shlex.py
13.18
KB
-rw-r--r--
shutil.py
51.79
KB
-rw-r--r--
signal.py
2.38
KB
-rw-r--r--
site.py
21.57
KB
-rw-r--r--
smtpd.py
34
KB
-rwxr-xr-x
smtplib.py
44.34
KB
-rwxr-xr-x
sndhdr.py
6.93
KB
-rw-r--r--
socket.py
36.05
KB
-rw-r--r--
socketserver.py
26.66
KB
-rw-r--r--
sre_compile.py
27.32
KB
-rw-r--r--
sre_constants.py
7.01
KB
-rw-r--r--
sre_parse.py
39.82
KB
-rw-r--r--
ssl.py
51.3
KB
-rw-r--r--
stat.py
5.36
KB
-rw-r--r--
statistics.py
37.17
KB
-rw-r--r--
string.py
10.32
KB
-rw-r--r--
stringprep.py
12.61
KB
-rw-r--r--
struct.py
257
B
-rw-r--r--
subprocess.py
81.61
KB
-rw-r--r--
sunau.py
17.73
KB
-rw-r--r--
symbol.py
2.23
KB
-rw-r--r--
symtable.py
7.72
KB
-rw-r--r--
sysconfig.py
24.96
KB
-rw-r--r--
tabnanny.py
11.14
KB
-rwxr-xr-x
tarfile.py
106.31
KB
-rwxr-xr-x
telnetlib.py
22.71
KB
-rw-r--r--
tempfile.py
27.31
KB
-rw-r--r--
textwrap.py
18.95
KB
-rw-r--r--
this.py
1003
B
-rw-r--r--
threading.py
52.91
KB
-rw-r--r--
timeit.py
13.16
KB
-rwxr-xr-x
token.py
2.31
KB
-rw-r--r--
tokenize.py
25.28
KB
-rw-r--r--
trace.py
28.52
KB
-rwxr-xr-x
traceback.py
24.08
KB
-rw-r--r--
tracemalloc.py
17.62
KB
-rw-r--r--
tty.py
879
B
-rw-r--r--
types.py
9.56
KB
-rw-r--r--
typing.py
75.24
KB
-rw-r--r--
uu.py
7.11
KB
-rw-r--r--
uuid.py
26.68
KB
-rw-r--r--
warnings.py
19.23
KB
-rw-r--r--
wave.py
17.58
KB
-rw-r--r--
weakref.py
21.05
KB
-rw-r--r--
webbrowser.py
23.52
KB
-rwxr-xr-x
xdrlib.py
5.77
KB
-rw-r--r--
zipapp.py
7.36
KB
-rw-r--r--
zipfile.py
86.17
KB
-rw-r--r--
zipimport.py
30.04
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : sre_compile.py
# # Secret Labs' Regular Expression Engine # # convert template to internal format # # Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" import _sre import sre_parse from sre_constants import * assert _sre.MAGIC == MAGIC, "SRE module mismatch" _LITERAL_CODES = {LITERAL, NOT_LITERAL} _REPEATING_CODES = {REPEAT, MIN_REPEAT, MAX_REPEAT} _SUCCESS_CODES = {SUCCESS, FAILURE} _ASSERT_CODES = {ASSERT, ASSERT_NOT} _UNIT_CODES = _LITERAL_CODES | {ANY, IN} # Sets of lowercase characters which have the same uppercase. _equivalences = ( # LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I (0x69, 0x131), # iı # LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S (0x73, 0x17f), # sſ # MICRO SIGN, GREEK SMALL LETTER MU (0xb5, 0x3bc), # µμ # COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI (0x345, 0x3b9, 0x1fbe), # \u0345ιι # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA (0x390, 0x1fd3), # ΐΐ # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA (0x3b0, 0x1fe3), # ΰΰ # GREEK SMALL LETTER BETA, GREEK BETA SYMBOL (0x3b2, 0x3d0), # βϐ # GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL (0x3b5, 0x3f5), # εϵ # GREEK SMALL LETTER THETA, GREEK THETA SYMBOL (0x3b8, 0x3d1), # θϑ # GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL (0x3ba, 0x3f0), # κϰ # GREEK SMALL LETTER PI, GREEK PI SYMBOL (0x3c0, 0x3d6), # πϖ # GREEK SMALL LETTER RHO, GREEK RHO SYMBOL (0x3c1, 0x3f1), # ρϱ # GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA (0x3c2, 0x3c3), # ςσ # GREEK SMALL LETTER PHI, GREEK PHI SYMBOL (0x3c6, 0x3d5), # φϕ # CYRILLIC SMALL LETTER VE, CYRILLIC SMALL LETTER ROUNDED VE (0x432, 0x1c80), # вᲀ # CYRILLIC SMALL LETTER DE, CYRILLIC SMALL LETTER LONG-LEGGED DE (0x434, 0x1c81), # дᲁ # CYRILLIC SMALL LETTER O, CYRILLIC SMALL LETTER NARROW O (0x43e, 0x1c82), # оᲂ # CYRILLIC SMALL LETTER ES, CYRILLIC SMALL LETTER WIDE ES (0x441, 0x1c83), # сᲃ # CYRILLIC SMALL LETTER TE, CYRILLIC SMALL LETTER TALL TE, CYRILLIC SMALL LETTER THREE-LEGGED TE (0x442, 0x1c84, 0x1c85), # тᲄᲅ # CYRILLIC SMALL LETTER HARD SIGN, CYRILLIC SMALL LETTER TALL HARD SIGN (0x44a, 0x1c86), # ъᲆ # CYRILLIC SMALL LETTER YAT, CYRILLIC SMALL LETTER TALL YAT (0x463, 0x1c87), # ѣᲇ # CYRILLIC SMALL LETTER UNBLENDED UK, CYRILLIC SMALL LETTER MONOGRAPH UK (0x1c88, 0xa64b), # ᲈꙋ # LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE (0x1e61, 0x1e9b), # ṡẛ # LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST (0xfb05, 0xfb06), # ſtst ) # Maps the lowercase code to lowercase codes which have the same uppercase. _ignorecase_fixes = {i: tuple(j for j in t if i != j) for t in _equivalences for i in t} def _combine_flags(flags, add_flags, del_flags, TYPE_FLAGS=sre_parse.TYPE_FLAGS): if add_flags & TYPE_FLAGS: flags &= ~TYPE_FLAGS return (flags | add_flags) & ~del_flags def _compile(code, pattern, flags): # internal: compile a (sub)pattern emit = code.append _len = len LITERAL_CODES = _LITERAL_CODES REPEATING_CODES = _REPEATING_CODES SUCCESS_CODES = _SUCCESS_CODES ASSERT_CODES = _ASSERT_CODES iscased = None tolower = None fixes = None if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: if flags & SRE_FLAG_UNICODE: iscased = _sre.unicode_iscased tolower = _sre.unicode_tolower fixes = _ignorecase_fixes else: iscased = _sre.ascii_iscased tolower = _sre.ascii_tolower for op, av in pattern: if op in LITERAL_CODES: if not flags & SRE_FLAG_IGNORECASE: emit(op) emit(av) elif flags & SRE_FLAG_LOCALE: emit(OP_LOCALE_IGNORE[op]) emit(av) elif not iscased(av): emit(op) emit(av) else: lo = tolower(av) if not fixes: # ascii emit(OP_IGNORE[op]) emit(lo) elif lo not in fixes: emit(OP_UNICODE_IGNORE[op]) emit(lo) else: emit(IN_UNI_IGNORE) skip = _len(code); emit(0) if op is NOT_LITERAL: emit(NEGATE) for k in (lo,) + fixes[lo]: emit(LITERAL) emit(k) emit(FAILURE) code[skip] = _len(code) - skip elif op is IN: charset, hascased = _optimize_charset(av, iscased, tolower, fixes) if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: emit(IN_LOC_IGNORE) elif not hascased: emit(IN) elif not fixes: # ascii emit(IN_IGNORE) else: emit(IN_UNI_IGNORE) skip = _len(code); emit(0) _compile_charset(charset, flags, code) code[skip] = _len(code) - skip elif op is ANY: if flags & SRE_FLAG_DOTALL: emit(ANY_ALL) else: emit(ANY) elif op in REPEATING_CODES: if flags & SRE_FLAG_TEMPLATE: raise error("internal: unsupported template operator %r" % (op,)) if _simple(av[2]): if op is MAX_REPEAT: emit(REPEAT_ONE) else: emit(MIN_REPEAT_ONE) skip = _len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) emit(SUCCESS) code[skip] = _len(code) - skip else: emit(REPEAT) skip = _len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) code[skip] = _len(code) - skip if op is MAX_REPEAT: emit(MAX_UNTIL) else: emit(MIN_UNTIL) elif op is SUBPATTERN: group, add_flags, del_flags, p = av if group: emit(MARK) emit((group-1)*2) # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) _compile(code, p, _combine_flags(flags, add_flags, del_flags)) if group: emit(MARK) emit((group-1)*2+1) elif op in SUCCESS_CODES: emit(op) elif op in ASSERT_CODES: emit(op) skip = _len(code); emit(0) if av[0] >= 0: emit(0) # look ahead else: lo, hi = av[1].getwidth() if lo != hi: raise error("look-behind requires fixed-width pattern") emit(lo) # look behind _compile(code, av[1], flags) emit(SUCCESS) code[skip] = _len(code) - skip elif op is CALL: emit(op) skip = _len(code); emit(0) _compile(code, av, flags) emit(SUCCESS) code[skip] = _len(code) - skip elif op is AT: emit(op) if flags & SRE_FLAG_MULTILINE: av = AT_MULTILINE.get(av, av) if flags & SRE_FLAG_LOCALE: av = AT_LOCALE.get(av, av) elif flags & SRE_FLAG_UNICODE: av = AT_UNICODE.get(av, av) emit(av) elif op is BRANCH: emit(op) tail = [] tailappend = tail.append for av in av[1]: skip = _len(code); emit(0) # _compile_info(code, av, flags) _compile(code, av, flags) emit(JUMP) tailappend(_len(code)); emit(0) code[skip] = _len(code) - skip emit(FAILURE) # end of branch for tail in tail: code[tail] = _len(code) - tail elif op is CATEGORY: emit(op) if flags & SRE_FLAG_LOCALE: av = CH_LOCALE[av] elif flags & SRE_FLAG_UNICODE: av = CH_UNICODE[av] emit(av) elif op is GROUPREF: if not flags & SRE_FLAG_IGNORECASE: emit(op) elif flags & SRE_FLAG_LOCALE: emit(GROUPREF_LOC_IGNORE) elif not fixes: # ascii emit(GROUPREF_IGNORE) else: emit(GROUPREF_UNI_IGNORE) emit(av-1) elif op is GROUPREF_EXISTS: emit(op) emit(av[0]-1) skipyes = _len(code); emit(0) _compile(code, av[1], flags) if av[2]: emit(JUMP) skipno = _len(code); emit(0) code[skipyes] = _len(code) - skipyes + 1 _compile(code, av[2], flags) code[skipno] = _len(code) - skipno else: code[skipyes] = _len(code) - skipyes + 1 else: raise error("internal: unsupported operand type %r" % (op,)) def _compile_charset(charset, flags, code): # compile charset subprogram emit = code.append for op, av in charset: emit(op) if op is NEGATE: pass elif op is LITERAL: emit(av) elif op is RANGE or op is RANGE_UNI_IGNORE: emit(av[0]) emit(av[1]) elif op is CHARSET: code.extend(av) elif op is BIGCHARSET: code.extend(av) elif op is CATEGORY: if flags & SRE_FLAG_LOCALE: emit(CH_LOCALE[av]) elif flags & SRE_FLAG_UNICODE: emit(CH_UNICODE[av]) else: emit(av) else: raise error("internal: unsupported set operator %r" % (op,)) emit(FAILURE) def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): # internal: optimize character set out = [] tail = [] charmap = bytearray(256) hascased = False for op, av in charset: while True: try: if op is LITERAL: if fixup: lo = fixup(av) charmap[lo] = 1 if fixes and lo in fixes: for k in fixes[lo]: charmap[k] = 1 if not hascased and iscased(av): hascased = True else: charmap[av] = 1 elif op is RANGE: r = range(av[0], av[1]+1) if fixup: if fixes: for i in map(fixup, r): charmap[i] = 1 if i in fixes: for k in fixes[i]: charmap[k] = 1 else: for i in map(fixup, r): charmap[i] = 1 if not hascased: hascased = any(map(iscased, r)) else: for i in r: charmap[i] = 1 elif op is NEGATE: out.append((op, av)) else: tail.append((op, av)) except IndexError: if len(charmap) == 256: # character set contains non-UCS1 character codes charmap += b'\0' * 0xff00 continue # Character set contains non-BMP character codes. # For range, all BMP characters in the range are already # proceeded. if fixup: hascased = True # For now, IN_UNI_IGNORE+LITERAL and # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP # characters, because two characters (at least one of # which is not in the BMP) match case-insensitively # if and only if: # 1) c1.lower() == c2.lower() # 2) c1.lower() == c2 or c1.lower().upper() == c2 # Also, both c.lower() and c.lower().upper() are single # characters for every non-BMP character. if op is RANGE: op = RANGE_UNI_IGNORE tail.append((op, av)) break # compress character map runs = [] q = 0 while True: p = charmap.find(1, q) if p < 0: break if len(runs) >= 2: runs = None break q = charmap.find(0, p) if q < 0: runs.append((p, len(charmap))) break runs.append((p, q)) if runs is not None: # use literal/range for p, q in runs: if q - p == 1: out.append((LITERAL, p)) else: out.append((RANGE, (p, q - 1))) out += tail # if the case was changed or new representation is more compact if hascased or len(out) < len(charset): return out, hascased # else original character set is good enough return charset, hascased # use bitmap if len(charmap) == 256: data = _mk_bitmap(charmap) out.append((CHARSET, data)) out += tail return out, hascased # To represent a big charset, first a bitmap of all characters in the # set is constructed. Then, this bitmap is sliced into chunks of 256 # characters, duplicate chunks are eliminated, and each chunk is # given a number. In the compiled expression, the charset is # represented by a 32-bit word sequence, consisting of one word for # the number of different chunks, a sequence of 256 bytes (64 words) # of chunk numbers indexed by their original chunk position, and a # sequence of 256-bit chunks (8 words each). # Compression is normally good: in a typical charset, large ranges of # Unicode will be either completely excluded (e.g. if only cyrillic # letters are to be matched), or completely included (e.g. if large # subranges of Kanji match). These ranges will be represented by # chunks of all one-bits or all zero-bits. # Matching can be also done efficiently: the more significant byte of # the Unicode character is an index into the chunk number, and the # less significant byte is a bit index in the chunk (just like the # CHARSET matching). charmap = bytes(charmap) # should be hashable comps = {} mapping = bytearray(256) block = 0 data = bytearray() for i in range(0, 65536, 256): chunk = charmap[i: i + 256] if chunk in comps: mapping[i // 256] = comps[chunk] else: mapping[i // 256] = comps[chunk] = block block += 1 data += chunk data = _mk_bitmap(data) data[0:0] = [block] + _bytes_to_codes(mapping) out.append((BIGCHARSET, data)) out += tail return out, hascased _CODEBITS = _sre.CODESIZE * 8 MAXCODE = (1 << _CODEBITS) - 1 _BITS_TRANS = b'0' + b'1' * 255 def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): s = bits.translate(_BITS_TRANS)[::-1] return [_int(s[i - _CODEBITS: i], 2) for i in range(len(s), 0, -_CODEBITS)] def _bytes_to_codes(b): # Convert block indices to word array a = memoryview(b).cast('I') assert a.itemsize == _sre.CODESIZE assert len(a) * a.itemsize == len(b) return a.tolist() def _simple(p): # check if this subpattern is a "simple" operator if len(p) != 1: return False op, av = p[0] if op is SUBPATTERN: return av[0] is None and _simple(av[-1]) return op in _UNIT_CODES def _generate_overlap_table(prefix): """ Generate an overlap table for the following prefix. An overlap table is a table of the same size as the prefix which informs about the potential self-overlap for each index in the prefix: - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with prefix[0:k] """ table = [0] * len(prefix) for i in range(1, len(prefix)): idx = table[i - 1] while prefix[i] != prefix[idx]: if idx == 0: table[i] = 0 break idx = table[idx - 1] else: table[i] = idx + 1 return table def _get_iscased(flags): if not flags & SRE_FLAG_IGNORECASE: return None elif flags & SRE_FLAG_UNICODE: return _sre.unicode_iscased else: return _sre.ascii_iscased def _get_literal_prefix(pattern, flags): # look for literal prefix prefix = [] prefixappend = prefix.append prefix_skip = None iscased = _get_iscased(flags) for op, av in pattern.data: if op is LITERAL: if iscased and iscased(av): break prefixappend(av) elif op is SUBPATTERN: group, add_flags, del_flags, p = av flags1 = _combine_flags(flags, add_flags, del_flags) if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: break prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) if prefix_skip is None: if group is not None: prefix_skip = len(prefix) elif prefix_skip1 is not None: prefix_skip = len(prefix) + prefix_skip1 prefix.extend(prefix1) if not got_all: break else: break else: return prefix, prefix_skip, True return prefix, prefix_skip, False def _get_charset_prefix(pattern, flags): while True: if not pattern.data: return None op, av = pattern.data[0] if op is not SUBPATTERN: break group, add_flags, del_flags, pattern = av flags = _combine_flags(flags, add_flags, del_flags) if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: return None iscased = _get_iscased(flags) if op is LITERAL: if iscased and iscased(av): return None return [(op, av)] elif op is BRANCH: charset = [] charsetappend = charset.append for p in av[1]: if not p: return None op, av = p[0] if op is LITERAL and not (iscased and iscased(av)): charsetappend((op, av)) else: return None return charset elif op is IN: charset = av if iscased: for op, av in charset: if op is LITERAL: if iscased(av): return None elif op is RANGE: if av[1] > 0xffff: return None if any(map(iscased, range(av[0], av[1]+1))): return None return charset return None def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() if hi > MAXCODE: hi = MAXCODE if lo == 0: code.extend([INFO, 4, 0, lo, hi]) return # look for a literal prefix prefix = [] prefix_skip = 0 charset = [] # not used if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): # look for literal prefix prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) # if no prefix, look for charset prefix if not prefix: charset = _get_charset_prefix(pattern, flags) ## if prefix: ## print("*** PREFIX", prefix, prefix_skip) ## if charset: ## print("*** CHARSET", charset) # add an info block emit = code.append emit(INFO) skip = len(code); emit(0) # literal flag mask = 0 if prefix: mask = SRE_INFO_PREFIX if prefix_skip is None and got_all: mask = mask | SRE_INFO_LITERAL elif charset: mask = mask | SRE_INFO_CHARSET emit(mask) # pattern length if lo < MAXCODE: emit(lo) else: emit(MAXCODE) prefix = prefix[:MAXCODE] emit(min(hi, MAXCODE)) # add literal prefix if prefix: emit(len(prefix)) # length if prefix_skip is None: prefix_skip = len(prefix) emit(prefix_skip) # skip code.extend(prefix) # generate overlap table code.extend(_generate_overlap_table(prefix)) elif charset: charset, hascased = _optimize_charset(charset) assert not hascased _compile_charset(charset, flags, code) code[skip] = len(code) - skip def isstring(obj): return isinstance(obj, (str, bytes)) def _code(p, flags): flags = p.state.flags | flags code = [] # compile info block _compile_info(code, p, flags) # compile the pattern _compile(code, p.data, flags) code.append(SUCCESS) return code def _hex_code(code): return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) def dis(code): import sys labels = set() level = 0 offset_width = len(str(len(code) - 1)) def dis_(start, end): def print_(*args, to=None): if to is not None: labels.add(to) args += ('(to %d)' % (to,),) print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), end=' '*(level-1)) print(*args) def print_2(*args): print(end=' '*(offset_width + 2*level)) print(*args) nonlocal level level += 1 i = start while i < end: start = i op = code[i] i += 1 op = OPCODES[op] if op in (SUCCESS, FAILURE, ANY, ANY_ALL, MAX_UNTIL, MIN_UNTIL, NEGATE): print_(op) elif op in (LITERAL, NOT_LITERAL, LITERAL_IGNORE, NOT_LITERAL_IGNORE, LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): arg = code[i] i += 1 print_(op, '%#02x (%r)' % (arg, chr(arg))) elif op is AT: arg = code[i] i += 1 arg = str(ATCODES[arg]) assert arg[:3] == 'AT_' print_(op, arg[3:]) elif op is CATEGORY: arg = code[i] i += 1 arg = str(CHCODES[arg]) assert arg[:9] == 'CATEGORY_' print_(op, arg[9:]) elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): skip = code[i] print_(op, skip, to=i+skip) dis_(i+1, i+skip) i += skip elif op in (RANGE, RANGE_UNI_IGNORE): lo, hi = code[i: i+2] i += 2 print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) elif op is CHARSET: print_(op, _hex_code(code[i: i + 256//_CODEBITS])) i += 256//_CODEBITS elif op is BIGCHARSET: arg = code[i] i += 1 mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) for x in code[i: i + 256//_sre.CODESIZE])) print_(op, arg, mapping) i += 256//_sre.CODESIZE level += 1 for j in range(arg): print_2(_hex_code(code[i: i + 256//_CODEBITS])) i += 256//_CODEBITS level -= 1 elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, GROUPREF_LOC_IGNORE): arg = code[i] i += 1 print_(op, arg) elif op is JUMP: skip = code[i] print_(op, skip, to=i+skip) i += 1 elif op is BRANCH: skip = code[i] print_(op, skip, to=i+skip) while skip: dis_(i+1, i+skip) i += skip start = i skip = code[i] if skip: print_('branch', skip, to=i+skip) else: print_(FAILURE) i += 1 elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE): skip, min, max = code[i: i+3] if max == MAXREPEAT: max = 'MAXREPEAT' print_(op, skip, min, max, to=i+skip) dis_(i+3, i+skip) i += skip elif op is GROUPREF_EXISTS: arg, skip = code[i: i+2] print_(op, arg, skip, to=i+skip) i += 2 elif op in (ASSERT, ASSERT_NOT): skip, arg = code[i: i+2] print_(op, skip, arg, to=i+skip) dis_(i+2, i+skip) i += skip elif op is INFO: skip, flags, min, max = code[i: i+4] if max == MAXREPEAT: max = 'MAXREPEAT' print_(op, skip, bin(flags), min, max, to=i+skip) start = i+4 if flags & SRE_INFO_PREFIX: prefix_len, prefix_skip = code[i+4: i+6] print_2(' prefix_skip', prefix_skip) start = i + 6 prefix = code[start: start+prefix_len] print_2(' prefix', '[%s]' % ', '.join('%#02x' % x for x in prefix), '(%r)' % ''.join(map(chr, prefix))) start += prefix_len print_2(' overlap', code[start: start+prefix_len]) start += prefix_len if flags & SRE_INFO_CHARSET: level += 1 print_2('in') dis_(start, i+skip) level -= 1 i += skip else: raise ValueError(op) level -= 1 dis_(0, len(code)) def compile(p, flags=0): # internal: convert pattern list to internal format if isstring(p): pattern = p p = sre_parse.parse(p, flags) else: pattern = None code = _code(p, flags) if flags & SRE_FLAG_DEBUG: print() dis(code) # map in either direction groupindex = p.state.groupdict indexgroup = [None] * p.state.groups for k, i in groupindex.items(): indexgroup[i] = k return _sre.compile( pattern, flags | p.state.flags, code, p.state.groups-1, groupindex, tuple(indexgroup) )
Close