From eefb760a5620a5ee753c23b25800561e46333c0f Mon Sep 17 00:00:00 2001 From: P-DR0ZD Date: Tue, 25 Oct 2022 19:00:43 -0400 Subject: [PATCH] Added Markdown Formatter and Full Markdown Support Removed Old Markdown Formatter and Unused Functions Updated README Added Tags From Issue #15 --- .gitignore | 4 +- README.md | 6 +- SSG/markdown/__init__.py | 28 + SSG/markdown/__main__.py | 151 +++ SSG/markdown/__meta__.py | 49 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1127 bytes .../__pycache__/__meta__.cpython-310.pyc | Bin 0 -> 1486 bytes .../__pycache__/blockparser.cpython-310.pyc | Bin 0 -> 4801 bytes .../blockprocessors.cpython-310.pyc | Bin 0 -> 17679 bytes SSG/markdown/__pycache__/core.cpython-310.pyc | Bin 0 -> 12095 bytes .../__pycache__/htmlparser.cpython-310.pyc | Bin 0 -> 9048 bytes .../inlinepatterns.cpython-310.pyc | Bin 0 -> 25055 bytes .../postprocessors.cpython-310.pyc | Bin 0 -> 4968 bytes .../__pycache__/preprocessors.cpython-310.pyc | Bin 0 -> 3291 bytes .../__pycache__/serializers.cpython-310.pyc | Bin 0 -> 3161 bytes .../treeprocessors.cpython-310.pyc | Bin 0 -> 12686 bytes SSG/markdown/__pycache__/util.cpython-310.pyc | Bin 0 -> 11595 bytes SSG/markdown/blockparser.py | 119 +++ SSG/markdown/blockprocessors.py | 623 ++++++++++++ SSG/markdown/core.py | 407 ++++++++ SSG/markdown/extensions/__init__.py | 86 ++ .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 3365 bytes SSG/markdown/extensions/abbr.py | 99 ++ SSG/markdown/extensions/admonition.py | 170 ++++ SSG/markdown/extensions/attr_list.py | 166 ++++ SSG/markdown/extensions/codehilite.py | 330 +++++++ SSG/markdown/extensions/def_list.py | 111 +++ SSG/markdown/extensions/extra.py | 58 ++ SSG/markdown/extensions/fenced_code.py | 174 ++++ SSG/markdown/extensions/footnotes.py | 411 ++++++++ SSG/markdown/extensions/legacy_attrs.py | 67 ++ SSG/markdown/extensions/legacy_em.py | 49 + SSG/markdown/extensions/md_in_html.py | 364 +++++++ SSG/markdown/extensions/meta.py | 79 ++ SSG/markdown/extensions/nl2br.py | 33 + SSG/markdown/extensions/sane_lists.py | 54 ++ SSG/markdown/extensions/smarty.py | 257 +++++ SSG/markdown/extensions/tables.py | 236 +++++ SSG/markdown/extensions/toc.py | 384 ++++++++ SSG/markdown/extensions/wikilinks.py | 87 ++ SSG/markdown/htmlparser.py | 323 +++++++ SSG/markdown/inlinepatterns.py | 886 ++++++++++++++++++ SSG/markdown/postprocessors.py | 137 +++ SSG/markdown/preprocessors.py | 82 ++ SSG/markdown/serializers.py | 189 ++++ SSG/markdown/test_tools.py | 220 +++++ SSG/markdown/treeprocessors.py | 458 +++++++++ SSG/markdown/util.py | 358 +++++++ SSG/ssg.py | 6 +- SSG/utils/input.py | 73 +- 50 files changed, 7262 insertions(+), 72 deletions(-) create mode 100644 SSG/markdown/__init__.py create mode 100644 SSG/markdown/__main__.py create mode 100644 SSG/markdown/__meta__.py create mode 100644 SSG/markdown/__pycache__/__init__.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/__meta__.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/blockparser.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/blockprocessors.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/core.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/htmlparser.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/inlinepatterns.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/postprocessors.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/preprocessors.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/serializers.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/treeprocessors.cpython-310.pyc create mode 100644 SSG/markdown/__pycache__/util.cpython-310.pyc create mode 100644 SSG/markdown/blockparser.py create mode 100644 SSG/markdown/blockprocessors.py create mode 100644 SSG/markdown/core.py create mode 100644 SSG/markdown/extensions/__init__.py create mode 100644 SSG/markdown/extensions/__pycache__/__init__.cpython-310.pyc create mode 100644 SSG/markdown/extensions/abbr.py create mode 100644 SSG/markdown/extensions/admonition.py create mode 100644 SSG/markdown/extensions/attr_list.py create mode 100644 SSG/markdown/extensions/codehilite.py create mode 100644 SSG/markdown/extensions/def_list.py create mode 100644 SSG/markdown/extensions/extra.py create mode 100644 SSG/markdown/extensions/fenced_code.py create mode 100644 SSG/markdown/extensions/footnotes.py create mode 100644 SSG/markdown/extensions/legacy_attrs.py create mode 100644 SSG/markdown/extensions/legacy_em.py create mode 100644 SSG/markdown/extensions/md_in_html.py create mode 100644 SSG/markdown/extensions/meta.py create mode 100644 SSG/markdown/extensions/nl2br.py create mode 100644 SSG/markdown/extensions/sane_lists.py create mode 100644 SSG/markdown/extensions/smarty.py create mode 100644 SSG/markdown/extensions/tables.py create mode 100644 SSG/markdown/extensions/toc.py create mode 100644 SSG/markdown/extensions/wikilinks.py create mode 100644 SSG/markdown/htmlparser.py create mode 100644 SSG/markdown/inlinepatterns.py create mode 100644 SSG/markdown/postprocessors.py create mode 100644 SSG/markdown/preprocessors.py create mode 100644 SSG/markdown/serializers.py create mode 100644 SSG/markdown/test_tools.py create mode 100644 SSG/markdown/treeprocessors.py create mode 100644 SSG/markdown/util.py diff --git a/.gitignore b/.gitignore index bb6f27a..b0ec25b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,6 @@ test.txt SSG/utils/__pycache__ SSG/Sherlock-Holmes-Selected-Stories SSG/dist -config.json \ No newline at end of file +config.json +SSG/markdown/__pycache__ +SSG/markdown/extensions/__pycache__ \ No newline at end of file diff --git a/README.md b/README.md index 852e027..8e03526 100644 --- a/README.md +++ b/README.md @@ -47,5 +47,9 @@ Config files should be in legal JSON format with a similar format like ``` # New features - * Added markdown language support for # heading 1 * Added JSON config file support + * Complete Markdown Support with the Help of [Python-Markdown/markdown](https://github.com/Python-Markdown/markdown) + + # Planned Features + [] Choosing Output Directory + [] CSS support diff --git a/SSG/markdown/__init__.py b/SSG/markdown/__init__.py new file mode 100644 index 0000000..d88b1e9 --- /dev/null +++ b/SSG/markdown/__init__.py @@ -0,0 +1,28 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +from .core import Markdown, markdown, markdownFromFile +from .__meta__ import __version__, __version_info__ # noqa + +# For backward compatibility as some extensions expect it... +from .extensions import Extension # noqa + +__all__ = ['Markdown', 'markdown', 'markdownFromFile'] diff --git a/SSG/markdown/__main__.py b/SSG/markdown/__main__.py new file mode 100644 index 0000000..0184008 --- /dev/null +++ b/SSG/markdown/__main__.py @@ -0,0 +1,151 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import sys +import optparse +import codecs +import warnings +import markdown +try: + # We use `unsafe_load` because users may need to pass in actual Python + # objects. As this is only available from the CLI, the user has much + # worse problems if an attacker can use this as an attach vector. + from yaml import unsafe_load as yaml_load +except ImportError: # pragma: no cover + try: + # Fall back to PyYAML <5.1 + from yaml import load as yaml_load + except ImportError: + # Fall back to JSON + from json import load as yaml_load + +import logging +from logging import DEBUG, WARNING, CRITICAL + +logger = logging.getLogger('MARKDOWN') + + +def parse_options(args=None, values=None): + """ + Define and parse `optparse` options for command-line usage. + """ + usage = """%prog [options] [INPUTFILE] + (STDIN is assumed if no INPUTFILE is given)""" + desc = "A Python implementation of John Gruber's Markdown. " \ + "https://Python-Markdown.github.io/" + ver = "%%prog %s" % markdown.__version__ + + parser = optparse.OptionParser(usage=usage, description=desc, version=ver) + parser.add_option("-f", "--file", dest="filename", default=None, + help="Write output to OUTPUT_FILE. Defaults to STDOUT.", + metavar="OUTPUT_FILE") + parser.add_option("-e", "--encoding", dest="encoding", + help="Encoding for input and output files.",) + parser.add_option("-o", "--output_format", dest="output_format", + default='xhtml', metavar="OUTPUT_FORMAT", + help="Use output format 'xhtml' (default) or 'html'.") + parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", + action='store_false', default=True, + help="Observe number of first item of ordered lists.") + parser.add_option("-x", "--extension", action="append", dest="extensions", + help="Load extension EXTENSION.", metavar="EXTENSION") + parser.add_option("-c", "--extension_configs", + dest="configfile", default=None, + help="Read extension configurations from CONFIG_FILE. " + "CONFIG_FILE must be of JSON or YAML format. YAML " + "format requires that a python YAML library be " + "installed. The parsed JSON or YAML must result in a " + "python dictionary which would be accepted by the " + "'extension_configs' keyword on the markdown.Markdown " + "class. The extensions must also be loaded with the " + "`--extension` option.", + metavar="CONFIG_FILE") + parser.add_option("-q", "--quiet", default=CRITICAL, + action="store_const", const=CRITICAL+10, dest="verbose", + help="Suppress all warnings.") + parser.add_option("-v", "--verbose", + action="store_const", const=WARNING, dest="verbose", + help="Print all warnings.") + parser.add_option("--noisy", + action="store_const", const=DEBUG, dest="verbose", + help="Print debug messages.") + + (options, args) = parser.parse_args(args, values) + + if len(args) == 0: + input_file = None + else: + input_file = args[0] + + if not options.extensions: + options.extensions = [] + + extension_configs = {} + if options.configfile: + with codecs.open( + options.configfile, mode="r", encoding=options.encoding + ) as fp: + try: + extension_configs = yaml_load(fp) + except Exception as e: + message = "Failed parsing extension config file: %s" % \ + options.configfile + e.args = (message,) + e.args[1:] + raise + + opts = { + 'input': input_file, + 'output': options.filename, + 'extensions': options.extensions, + 'extension_configs': extension_configs, + 'encoding': options.encoding, + 'output_format': options.output_format, + 'lazy_ol': options.lazy_ol + } + + return opts, options.verbose + + +def run(): # pragma: no cover + """Run Markdown from the command line.""" + + # Parse options and adjust logging level if necessary + options, logging_level = parse_options() + if not options: + sys.exit(2) + logger.setLevel(logging_level) + console_handler = logging.StreamHandler() + logger.addHandler(console_handler) + if logging_level <= WARNING: + # Ensure deprecation warnings get displayed + warnings.filterwarnings('default') + logging.captureWarnings(True) + warn_logger = logging.getLogger('py.warnings') + warn_logger.addHandler(console_handler) + + # Run + markdown.markdownFromFile(**options) + + +if __name__ == '__main__': # pragma: no cover + # Support running module as a commandline command. + # `python -m markdown [options] [args]`. + run() diff --git a/SSG/markdown/__meta__.py b/SSG/markdown/__meta__.py new file mode 100644 index 0000000..ccabee5 --- /dev/null +++ b/SSG/markdown/__meta__.py @@ -0,0 +1,49 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +# __version_info__ format: +# (major, minor, patch, dev/alpha/beta/rc/final, #) +# (1, 1, 2, 'dev', 0) => "1.1.2.dev0" +# (1, 1, 2, 'alpha', 1) => "1.1.2a1" +# (1, 2, 0, 'beta', 2) => "1.2b2" +# (1, 2, 0, 'rc', 4) => "1.2rc4" +# (1, 2, 0, 'final', 0) => "1.2" +__version_info__ = (3, 4, 1, 'final', 0) + + +def _get_version(version_info): + " Returns a PEP 440-compliant version number from version_info. " + assert len(version_info) == 5 + assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final') + + parts = 2 if version_info[2] == 0 else 3 + v = '.'.join(map(str, version_info[:parts])) + + if version_info[3] == 'dev': + v += '.dev' + str(version_info[4]) + elif version_info[3] != 'final': + mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} + v += mapping[version_info[3]] + str(version_info[4]) + + return v + + +__version__ = _get_version(__version_info__) diff --git a/SSG/markdown/__pycache__/__init__.cpython-310.pyc b/SSG/markdown/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2a8a7430958593a25ed617978da3eaf8a256c1b GIT binary patch literal 1127 zcmZuxUvJYe5Kr2!?Xpc861@A3MOu??f(;dffbI`N+cYUd6Hz70P29#Tjw9Py2;YG( z!5d#GuRQS;c;eDF>o#U7C!c-yyWidUA~rTG1J;k%SHY8(Vf?O?yQ=}pJNQ&3Lc=#g zGi<~rZ}<(~^qaipx6JCS8L#lR-{u{^Gt*mfm#_M(rZF|b#!u6?!WGZ3+oS|Q>!Q$+ zNYELP*P+N0%X*I%M~tU2mdngcq=1hfxSA+m*m`Mqhiwne;n1FFbikKwmlyRuxbdFI~ z>Q4Ik%*5Wd4mi`YKwdXA_Ca4gv98EN(E2QwyaYYKojWzeee#@mZB7$o(vDqy+eo3dlph? zV5}g&KPF6x-7LFxB4xLJN#$m}JCcGQv6$M8at-4}9E{7&rNWYlz}Pm+)x&QZf&-Cr zd#&tX9Et#AzZD2cWi^|!SqYe2!|!6ufeFTPr8;YaC@ot1E7_?I0D{CZ#=i`?R#{cz z?{GL6eTELHQObmtqk6Q|Xg)HdG?e0d*i&jc^1S0woyiDemN1R6lNM$74Hsb+)3;R( RRP|SB8Bd>Wn(NK=#y|62WYquw literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/__meta__.cpython-310.pyc b/SSG/markdown/__pycache__/__meta__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33b107d041055b85e2137e5b18eca6b8e23bba28 GIT binary patch literal 1486 zcmZuxO>f&a7#1bDNmXadfMQL69C%v{)Up$#O_3J+a)K;H5(IUJVu1mH7HOM}Bnl*L zLxFQyw&Soru)}uS{t~Y{r)#N!*}4jyTptDROWFS(oPoI4zHJsS5z zE?U7p*WZo%x6t`eEYjBX^vz09t4eQy){}$Vl)T7!DCDq}OYxq?x^2SQ?k2W*N9Mx?StKa3^E zZ^_w6vw}@NV1WtPlq)JyD+ri6V=2M8q`>JdCvdzYMM5%s$W!oec+Fv*y0iRg328hH zc2llpfx9CTkFsjQkH7#a$g}MRCK<&CijWu|jTO6>ctB#7Fr{Ezg2bRB@k}n=E*m-PM7E>^wZzG3vcGmEFW;KI``!@me0S!Z(;p+0$i2@7Ge-Fs4wmF)bQjYeB?tL$`0Rn3CoQCUhvF#DanloSFz!n`h0IoIi{Aw zwN(a9%mQF?o(=zUQw>>{Tz0>RrDiy_`yMPMb;%d)@ASuX;3k zQIx_$qA1J@Q;&vBNAnpm(5lZNEW69-vAcrm?qlTF>i^c!8uDGa3PZ=Ba3PAQh@;3X rM^$PmHi}A!d34(ym0ItF72q(7uTnwB3H#EztgJD<_{(+6`a5-IFBhFhxbqD(p|xj)cId=*;y9LVC5=7Dw!%i}#%|KwYueVP6*j}U zudQ&7&%JHzwa{;cZS>o3oxOJG>{y+7zjQ3idofl+k+SE$JPO4q^}NTddKD#E%oCm} zUqu)p4%x58FlC#v=yUn=d^)A;c^e`qrXwG+p;B4?aBVHqvhO67?Cu~^!=m4f#G1Dm zsjrIu+0g1!Ad)q+{GD2(wR-v*bbYaPHYFQpQCG;pS|-IS9;mhH4A0w9zEnJ9{V~)` z4<#OVRK(LxGoCTs&sUnM}xd>5pSSWzV7nHXIlkWT~0$kMxkv zs<)9uN{-pi&<}=bz2Jqx7&7)hd(dHi8nUh2_XGC4$oZ9m5B-40JkRl6LPc*~WMdf( zhKk+4d-uVe`*-jCg6$4@?f%-D?1eEeTRHBsd))_msn}N#a$dxzt0d?%-F|+_T9|yd zd;iY8?&tlB^R9A{f+7OiMX4XNV=i+*u;Y2pq5w|h580PH8*C-#oITrGf3m&vq?;I@ zg&YoybL{Uu0>#>XZ=>zlrCX>d4(QeFg#a*7wSot3?b(PdC$#q(p%XT~w)Wi64V(By zEQKxn&V_A?oU#dgDenIX^VzQSgCj=z2FB^yKq=1&e>sh#(4p@cfW##%&v5Myqnwda z@E{9fKhGJx$T3L?2AEbt&q??)7QxXAErwRng7PBEgj6gkVih4`S#HJHz;v$TnmvB8#YWQ4GA!szQB=}=7Du`2vL^r?Q&GZchTa6U%eh*`PJDIt ziUg!#r8M@4PPJXF+ zdHUrrf>-=F))NvAz72B{Wo*qvpV$jdz@TG**YC&JlSn}xv}H8}xo8zbeyF*r&toxy zBCu1RYsge91%&7NqUQ$gb=>(iG$+%=~_ktuAT*qPj5JDd=ag5~r+x@;SvGG~1H zjZ?Zgj}ObH{`QR}=ON^uPuCyr{}wRL_p?YSxnEO%zHcafKMSRpgm?0Mu)njjxnHyQ zzBYdq0@=81koQS7(({@#ncpFlb@4W!kgYFzrV)Ay}OJ{JVJ6+AKPg1*46Af61y-eZGwu3L3q&rFR02 zmSNu`g#r=RknM*)K7S?S9-l(BEll3=fz&{b>YX=msYPAilEn% z3$)Y{HK;yj4Ma|tWF<)@$aiq()N5O=-Tt`kF1q@CfLG@(ve6^lIa&38W7cpU4BHAl zU~{2dn4!AK-x1B?bsoE>{hW36X(FDHcUB05J}EqG>*m9MuuNd_W6g`VZshd$5hN z-r1+XP|s0~xAg0t*l3%f1v}W=ojKXSsIvVVJ^(Sxaru!6q^YXf{<2MjoFwcx2H^zF zi!cWuqBK&yo(?9$j%T-R&z{_zF?P2ebV+TRpl6axN$nHsmGi1`O6s}@%O?76Xuq2y zk~u{|iZLyEcdF^ee~wtL9O}Ct_MDFbyRVRkHLwLsG_PO2_zWsB0?7H~2B)ySi|? z3`3G4xhf<|%9+jR+@BA1($G8)8V`^luowz$KnV&&NC&lTRvB$5i#X6ZBc`;yPP=S! z1>BddYIRATFIyDV${&<2ZL0LN^6SGQJu2N-B1+4J8D%TXlud2ai>O)Bb2lnCeFf>7 zcCIhnv={Bk@+@@DjSL94^+nH9{M<&zB%>4OogG$aF{J7LbrvN7X-p(Jg^S{Ekh0(~ zUn!mSMMa?63b0eiXDnn=FM03afMy;XAk7Xcyya}bQ!ZimF@vY^R{J^-`V26wr=}j) zahIb+h)^1hBII+pEzeNEk=OdD>yK(!;*}{0YGK;TA~kuOaHxfK*=DT))g=InsSenv zswZkx;N$@9QTd_@CJlIcUMmewq9Kj9*9E|H9&cm-167#7n5qImjdJ8nO!i~c^{@ka zS&Z{Sce>8DfP(c3mk>R1Ja4<>lnWXH8WTAR2Qp5lp&6;1GwYWwIpPAy;GsA^W-#qrf-9dZEGFI19O znPzJK0*s&wNU9QXr=nIb=7*@)t0EKBQRAdSU?zYY6%op(!ia0m;VlGU0-RP?Swts= zv$)`IGJG(BQPrALDagKD;&>L1XCh8h8v%)BML&*$8quIj;RtQ@xuCW*T` y;JY)7FTaZ+@`q?j%hVSxovchLLDR2Vv21@~s>olF)pQl;S~qSj+SeO5e)J#s>0iD8 literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/blockprocessors.cpython-310.pyc b/SSG/markdown/__pycache__/blockprocessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5242291c767e741f8a702e004cedd9cfa048d81f GIT binary patch literal 17679 zcmb_@TW}m#dfs$T&jpPk2!Y^Lw_a(v#6aXwqO{VQOYtHQBDqUKTmZ7X5U9amx&aJ0 zm>He!A&Ai+7gNq|;!Etf;-umv1=y)dROND&U-FdrAyqEFy8Pm*&P!5Bs>0VOsZ=RW znD6^fch3c&oTM^9pEP#I^XotSr1}TdRO&x4(f_fKxQ@r4Fj6Tu zwUlxV*Q^^$hJ2e#rhKQjEHh=KwlYhWk#f_nRnKnamU0r$)bm>-OCv_gGxlsZyKgLw zx;Z!hWooH#E9H*3_LnKweqt^iL2A@3AXV^=JV`H&A$`OhLwZcoN0C12jw3xT>2aiA za3_$Skn{^kA9E*>p7dV8`x8hXcTXUFLduUJebPOJ^eIVCB7NFDgY+3mA4mE{_a&rX zlJp6rUv^(X`V~o^MEX_NLE4e@Dc4*`6~9yYQ!J5ve<#>zHk^AE_0Vl@H*EWc6J^!5 zT6J&BYXp^`h8oQ^=SR(rhI31`S3PykPhKh6_RVIsoz%SSYy?5ezkKmxOWMA$6}4Sj zuLT?J)l#i_(Y{p+Znsx^RimP6bL*n+{z5#Wi}C9hG1mKcdaty0TD4MBtzT@Z=B8H- zE+%i-_Ciomf#*7_I~ZnTO(D7v)VzitsERYi@nLw|+uJ2~+f(bMh8J8cmh5|#8fIH- zp!Ql*ITdHk+je%mit^dePur^IELI+F1m2GKXrL8z;BD7@x4Fdup{1F&QdqhAj9XZe`9`OcHVyf|KG>97dL9Y(*lin%Gtm+V2Pd8ieIZrX05SK&Pf(? zwce~g^h=I&Cve=R=Qqv;PPN&nda7a90^h5zIose8r`D*04jpV5`O0_vc6Gz4_|Ek7 zYP0T6Ph)YM=~_^!*Q);Xd1tj9II4n0Q_g1F5A13ssBUl|(%pHdUh@P0yz_ax8F={i zf+`4roBaVR+}ESWJhm#X=d@cKas`bx&?%z2l4@2#Jx#^;L6Um1oL|LuYinyB3El~I zTG+TX-Eqm8eH>uFuy<^F$EmpPMdh{XphSFcH6O(TNZPhBM@(bKX?Wnmpy^cVpfqQr zxs3+2h>9lVRFtRJ-7VQqDf#D~Lei&-MrgHzS{>h`eX|SmF)378EB>Sr$>8}h9{)ch z2vXhD6Jyuxnw!R+8Kk@B6VVv^X(JWoS&|1`(U+HzybQENuU=O2pfURLa*_w#(U+H( zJZO-w{ZcqFgLw>B zyp0LnmeKbr9$%124QG$up6}He`-olir9U1_ACr!LLayvpnwXDg+m{Cx)iX}=is{hY za>MMy?TT9W!>krTU!~OduomC^Xy)?r#}G;Wa;w%1)N-t${bj9rms_rCcH9fTzrMV% zaBDecuH~UMEVXvRiB*zGSxU;fn`!jnpFxl}Cym4YEJJZit9S-J7M^Kh`$K0d%9&xjvlNP@T`q%nTJbjf5PuFnvlL{0dsSE5uGQ}7?H!ufC zP>#2%QAxz)Q8TNMqIy|0vj?Kj;)Nl>o*)Jb+2H%sA#6|+gPvoxf{99I{H~Ar z$&{jXjMltrV(a4%<9ufD_+<93(M^>RZW@~=Vy2r~NPT6@71Qc0O4aKKLOZCemg`<) zJ=pll3@vI+T}sjy`d8;zFJCU#8nvKYrWEoiBeKSr(K$ItFQvHFw+{@8!FXb*d>88A zGzKLgI^8+%%;5qNy%$l;m89Q9W*% zfee0xjm@yFA93_q1bH)WSZ3$c(D;*zb5*ie{NWpXWNEvIbirutl*)Beo2gxMJ++(e z8hbE`_KjVuYu!veczHL|&Fp~@yXItS-%zi2(|hSIRGVx3ig_A2M(Xo%)Uftb{sopn z1xg;wC37#+P4A;Md|N@bo2J^-Z>`bX$aLPvN{Py^IM_dDO*OZKUJwCrzJ3aL%=oQ(EeLI&P~RtL3bU0~%WJqO3RJBXTAR&U zL%oU0#jIXxi*1EjO*%`thPN#l8FuQ27QcqKAwxvD`V%zbpFof|vxa5ZMjnJTiLhXp zreky_hqgpsQY7qwEg{n2BU|!Ih+qR?mtq6bff1Nov?m5O;zevox@(v)qUt@CawF7b1jURju==4?t9y^Ux_X~+qc*&tcD>p3y_;Os z71Zzx2yAoQIAxqM%)en9ozp{W*{eUWmLzo|Ij-f`S-0%wtuiZF>ol0cydyAt*GKZx}&~8LHV{@00Big0)~s_{FkX+H+% z9=8+>4;T@pcwc(midlgU6AL66n(I*(a8g+NNLkTBiDZjzVyafcn$}>VK;u`$Oe!X- z4YiZj1_U{m3EH+9V+B|AHw_$&L_jlU&DDxK?_{B&D-sxf6HayLRRx@|yq3?3XIQq}1rJC1G@ zPK$?;U`TqI6VCoBtvUZKaA25`u7)`tQr8bQQ0`M8S`!@&d+*0Mp~Uhk*f#kv9`m*$ zQUqWBYh*g}?M94Nn(F_Gqd(c8D(PXCgbOvNLvyHROVt3I5*rpQMd^{y1K@NrdD)8u z@>H8}_xP=*rndzA0p$%LpnmpE+KPY}wjYp{LnMF$1|^GXK|MgTboFRO$I8&5&7>v1 zER@XI6^i`Kq|#Ys!a3TY9xRe1<-3g^i>8jeJd_ZXIwaBtKD2oz(7%@5{l z^rpP!@x_|Yb%zR*E9=T-QMcmey0S1&UBA-WJ!P*B!I1KS1GaMk4SnXa(MjL5Co|wsZFD6{00PjU;@{| z&fBz3z^H(gm!XwcH0L@;KAW2Q)Oqm5S%0_qMzNR$lyA3y*0^P%sk|^pbWp2%+6EZ8 zGk0@#Zm~Q+8yiy=ogrTt;YhhGHa)gm_ENTyt^7U*M7UTmP8gk+=#Lq40rl*`cn?uK z#W-J^!Q<1-)inv-F$sgfym<~Wb2GhX(Vi-#kTy0m$jd%Twax`O+K$X~)2DG5)lTA4 zC;y1kx8?+Ghj}p`Ry|i;L^s8eKB!F-nAQ!nRG6#zI+6}5>!E#TarRz$@u&A^7X+V& z_T4)Ri&4fbUhAW4S&Li9`{#K4F$CkVX3S>=_;T?5mz`6Gt;zuV2e7SSVZWzWN%BXK z{~q7ivvqDay>9HAMym0XI5)|;-OVIzeH4|#CXMR<0$R_Y^CgvOEQT@F)hF#Bc4ARF@oy)mmRG7;iEM%QxIcEC8K1f~&JUgez zBZ`)vY;I_@)c28q6Dtjc5RR;S0UR5&o=9ifsqs_9!Zci{>Qm+|GoVqc9dWl?Ng!Q%=CF^Uyl%>O?TB+id?s1~=GA&abTk ziL9&7m^CCEDK-BPkI#JD7>7+fW)_T7*%QV@x?oJ0rwlu7TLoCn}5?xkRl)|Q*UtOj=AE@$jqDyoNn6?hN69C=A!RJ;Q&eVh$WzL z(o=#kdFRl{T+vche1~>V)9Z~`MJ@_^t0W7=cZQc_9eIC_M++LTRp-^iCfv6P0}~(M ztbc)aKuMfBJvErJI!0FkMd&{G1!dNYsa>mKy5KdWzy*6b7rq$vs%z|KBp*JRZswt- z?t?zTWkTaw*J>N;RyVsjg178PDgSa59|hO-<*S>ZapS!d_&B?3G%_P8P`E*_j&9XI z8>ivrnRae@0gOn{Ftrh&ER8N4hye}45UUf$NGX<^Ng#h)TOC>}Xz6)rqMP7uZzAYyJ$XlyMN^dKU}5`xZ%lWut1R`9}K;PJnU zppdqpAuMyu$eLCbMOHb-fDqvCAl86ep-Dki*2lr*RwMZ1f7gQD)a zj-IEO{yQ4@nq#G?ICvm)&|u7??g*Y4JpP{{0G&g(2)~%{aWV$Y3<^$D`>HG5^d|hA z4^4FiM|^O*F^7I#Lv%+j(j8P6P!C1Aaiu7`Lpy?3_4c>!Bp(zRDiTS3O4R_1zl)N%_Cx`I@vM7~U+T zg}@JR2^Z@+9-sUfbBRZ62Rg#hls>A0{FI}roD1YG$5MHa`G|7xQs?BSI%7oV34^DQhqG7~V`P8+A0z0&y`SqrL*?)w zL0uw~fi=?mDSD@OGbjh(10c-Ax_U1wh7XLOFY^S3tm{f`s4h(YX6lJB@6Y?y6&ZJi zD$31(MG@Q0HL`)dHwuOWv+U;B&n7`!hR{-a9qZ;a7>mk|9spx>b008WGRQo}iPGo4 zj$L_>+chv4ktx4reh%Fy1dgMeOqE{Oq8XZ{H`P^C|H{yOl8|0#U!i)V<#P3GyzVDb zoi_xv4Xr!qyL1KSF8qsE5sBXmZ9y(_C|(>_!s0ZM)h+@t2~^NSDdLkCpM9T_B$;b& z78P1ya*e?Tf`iQcA&VLa!tAOCQQ;|JcWN8W+kca>gtuSf#CURVLq~#tjd9a5Uo)&{ zb{c#RRvSx?AvZdFJ2>fq!#C0xpBX&DgkTH7exNWDXC@}7#Q+VA5%qOVbsV&qCgeei zrvO%GDn9*KS-A?wmN-3EaT}l^t_lDXn!pYCrM@w)3Iu;jbpv!Ki&{rq{Q`q6=0+|H zZmz7zux$J5D0qkS=UGMp*w?}zB%B$YWem+$2xbnFVh9|}v;74IGKiiyWBYx5CWIxT z<~Td1F395;1t)~*!YRqGB5UBo1?9&l5{adnK6lQ55jaa@Jr-`Jt^1n@I&O? z5jk$>F+8q!M91)>;*Tg`xyKYYzVk|g6X4v2r;bNSJaM7(23MZceN+mKw-(h$hz#sE z*FKAfL-Yxc=q;cLNj~m*3taP5D)tyiE{#d3^&~edxdg%TxEY*~p>)lR-pWO0Sm)$t zS~8z?cbDBamP<&zLGdh*wDX-_>C}{z7rUQMzj5IK<+;@BI8x=Tdmb$uQl~Vneuim) zt9Wv#Jx;ytm18sRahRXEedq2?_%8(bs`IQY5U;n#)Is;S$3#_dT9{d!0_UG)!B&N5 zos)w%O#W)$#n)_RP|rl#1)lsb*nf`$o~Jy13Byqebvws~x9K(j|(Yg6_WMhN0Ol6Ij*HZ$toATn&kK$kp#E|9IRQI2Zv<$DG? z+LpJ`ey)q%hX(n$o7)^wAEPZ>mkC(QGIwuuKNYo@E007qs9Ex$sKVwEnEtp048uPi zK}s4I5*NRT^oO`OGE6qv`B;P1++=$(1icn2#>k>hiN4- zxG*N&kt>7&x<6a@hcn^VckQmdcibIC*$Fud{v*~x?=r)#tw-?rKOP>pj4vsTT9y$C zHxwdZNoSL`?+G!*6kZyUvsZF4U_04YSXcd=u`d}sWbh7x;z{ki5XYldPnTFm^DHbZ z-1*?{ow-}(#T&O41aE3+>9U5$8WwMDigLo~yau0VS=iH5t4vZ;g=w@B=I-8D`f2&2 zyRgz1Z_F<)d~#>;_ENSI0NSqh421{umKl*`W4=?ju5a@Ub}42p41I^;XyPr?A7gH302670}JbGH?cJ zSukF#eZ0Gu1)ZnLxQhs90CRztNN1n(YRHIgPtD`nM;12_Y?ryCLUDK9Jj@%(C#qr$ z_YQPB=!Y#luLhxU$8kP@cjloJovgt-fTI#o60$Avr2|r0Cqv1r1@cqs1#y@Ot$mXb zqo;U8Bb>j@Lebd2$yiU!Qg^Gr$KVGH78&$x&o+|Ly%6o``fZkTTSdn&*>~m^XK&5U zQ#gw>?uq1IL#5v#5xj&5Jp|PFV`~;z41P%ai)%n; zAVdBw4&>JvPyzHg3D~)6G3bq|=M{Jb1=l#VNknMnSf-V+fD7Om7zOqHP*imlg##Zh zFhAn?7{&%YsLt~WE44)5Kt{ca`q#51cTc!bf5bjw%hNI^OMK6sJG90-j@QwoRg?=c3BM4 zscZMIi2Lp8>tDQe{@vXoGxWt=)>&RIbwB-VdHF$c`dabzY34M#-+jK-s{RhT)wXFr zdZ1DAT+{R4U@0_uICaRq+QSV`uG|bB|Mw8|3{MEAe(`sE*zgq7(uz$>!Un~28hCMf z#%PeEN7OX~@#aumJ~03nVNOCc+7y@q3v%T`H>*BG-}KFh|DROJCm7p1ic^9#L8c)& z#mLkDO?u<3t6k_>y~v$cKJm`s+WUGF-e;b-t>P@%(PfvNv+96tCwewa<0?y^c}Kd@ zrrev1iJ%j6<%dj)M$Oin>$s~GTDTtE_!jF<{R&=L%2d{PP1@M#6Do-2b=fHljrDyqd6)zT=-(ds_J~#pnUt&V6Y2|_a!WuTM!nCX8w~yl zgC3bAWcnXTArmOXX(n%Jf;_32Ujc!3P9MIW;!Ly;mFo$Ch9mbrzPoq__;q&JF7FMZ zN2?^Df7?&1W13c(|6E%2FQK%bR$X(+@*t)>gqu0gYd)e^_`XE!1C@!`e;F@!UQFoq zHhweJ!F5{P9Gb@+2#Wr-$e4Y5ck0^vl=c;I0?V<=)u{`lU$`hQ?H0dy`}}Tqsyv*r zJUy6Gq{?`|uMrx>)88F%Y?24lcxLd3QwT!0{{ZU<1T*h;!&jrv?dBFg#P1nK7G`hE z&)g>B5)DF7cksZif!yK!r~LRI@v0Mz;mihMXT0y%7zep0hqpLX8D>BCXkg7txZ=o! zyNQlWxR`hX2QaN(#)Jl3nlQR>#!pj{<{>mkKGr&WBl1=j+Jn4g4{Hye`(xxOecyHr zF<32SW(cPJlCfZb;tT* z_lsQs0HJ0QXrDJ)PQy#tHT;eS7l9tLo;ad=!uK5WNV{jyzyBH{ zJYB+Hx&XQ@z(|iznnWi&EBsGLhLeZw1SwkWu(@9A5|b`SUX$F)4r?$~#WV1U!H4>% z$Oh{A4!$~P5{d-ZJ#>;Evr#%PC(?QO(9^HN)la*sw zuGhg|r5z+(LXlk5hyU*o=@IS_RNsJz6miM}F2W9oRptPGYcB=Pf#8(@WM?kCZmnsF z<;0T*Z`~Sy7m?o_*@PDl{xXVOc<(ejuuXLjeHXwEgC$w~8dJbaB-)QeqMdK>;8orb zp&ys<_nR8-jPR^M*AVz)LjsIO!W%1<63Fw7aYg+tB%Wp%WSLD^1pPLq`9MUYsJ8^R zhZ+0^1H>(YyeI2l$9tjOBNrvoUigSw;afm`Xx3c$Im^Q^17{W(r$^5TxBLKO77vDo z^%FROodl;$0x|I+=V0GY9J24iCVLn{U$Z?nAVon;4@hHG~n zPb|h?L%a{u+>d|yn4BaWkkSKmt$v?v$#|Ye*dL)w_0QPN$)xqjkAZ9D2b|d4MRGP- z$KONHN7ir!gLLLVjLGdkbpz5D$DJV6*r|O1PVKDvo|}ezMNFJ$RvD5R00?sY1ddB} z3NGWkx-9h&lX`}hzHmB^0G&svzGF{Vo>G$ON$J@B)c)CjfDw1n_E}$?ow$@D@>rnv zSriT5sQC@%CGuDkCHG7aN9-|siy1wLArkpB<{d(4^H}YO(D1V<(;mSU8eTOtvuTF( zlv@9(bK;Pl_;z_rI=Rc86nV^4@wD8q33D`ld0&kXWa;?ht$Imci;wQaE#g;sf})m2 z`*Xzco#rc}f*s`An9@Hk=GDBrVp6N$MGg6NtK9sRi#BqPK#twSzb4KdF;5k7zn~XU z7{UOGL+x9rgmuFF6@7R4Dhb-3LSUuxP99uYz#oqH(qk`-FO8SSzcW5PuEs~lUmZU= L{@VD9lP~{oD|KL_ literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/core.cpython-310.pyc b/SSG/markdown/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa8d1e898639800ce5e7d456259d207e6118daa2 GIT binary patch literal 12095 zcmbta-*X$seZOBg9F8DJQM6<`Zswvyu|SKUCChfgIC5x8vQJ)i zJ8179nh20&A~#jqxHIWnr|kqx^Wum80iDjIPkriZpYE;GzWAv#eM;HH>gT(AcK~R~ zlWEDpVzIaT-QDkge|)~*y=ZpUl<@cQw{Erm`P-88@APBxPsNY#;1mBEg)7;TE4xY{ z+cN)FY=wWTw#vUXTXR)c3rZc`)@2FxQc&&~w!!OqQ0bVq$?N4{rc<@6ylw=uotjLQhm`$6-hGR-A~$4XkB;MT{qebO>@o4n*2^T z@H$?YIEjxQ(YE!6Q9HEWWW6nqJs%fC8m4(EYW0eqt5!Qny7B7DN|)1K?qswZJATsc zZ8iL8#eCCG-s)|QyJnACQD;R=znrgVB_F5B!Z=}$b%xf5yD4z`XPF6N6d1p zZEw%o_Z$`zq4#^tw{ALj+ljaD-Jha@3A{Z&cB2k02$a@)jDd5(zSSAi1dUtHe&B@G zHNOKM?g$#hsfq3H@hBbejQxEI6_qwVl~LB^RO=;vfbZj5JwI@p zUFJb*THr>+Vl*#^=B*%V-EDSp;MkpC!L@PS@gr5bPm5ioM@=4sgd(OHFDuk=kwYX7Im)pfoB`n*KWLh z>782}E!yeiUty-YX!RtRCy7iG%#vbjUdhv4^=}lr>}qZa=D=`uw~X(KYq%ABo3823 z;Csfcy0iGM3S(l=W@e<8nUOhn(R~Wf=iO)AlkU?W$@Ve#1Psfwtcq#-&#Xrw4lXI| zgq1|rTQ{#?Yow~{-v=i&+I3pun{UxM<$thCl8(>Y_{0xU4CE0vwFk;TYRWKp>Mv!~ z6x1|cQ+Fi%#(q$iTpcAoqw;|yOFlg@lFGMDLap1X%7KT$~?rz82#xvAppiP8@i%6@}aW4$d9wf=Z7GZyS3_ z0_zZSM^v0$JV_(!CEZ@qgs18_$*OgQf779h2Muep*X>5!4T?UH73Qr@?RUvK#ntcB zs`U;h7hCUZih2wRlh3tkT^Q>}YL3T$s=*x8=MUxEhw`0NUP|TWq5S@0WCSP?|Qb$t^U1Ulz6a- z!hf(iOaq%4*ajpZaB&>AiQ{QGT{>`E=SStG`gVk2%+`p*w$eU_&v|@a!sh}$FXQt{ zs<%BzKeP4r4vTtSw6RnpU|?%NO+8x;o%^;D1*zP%Rk$X*90Si_p0tFWW2H(ju=SP` z-gjc#XhlIr#&uI|D{}X-&?GI1M%~|L&;VNrd|MB^9WQh-3-lyaJ5JX&!YFPrzni4G z5EFcF5qh%qNNml3>)PRr(nU|Z#DxKi>UE$?`*tZ2lP0chB#=!=m4ilNg{>rQRB6Ac zGP`si9F9`yW@?Uw*gjU&bMdb$Yy})o%b7Z)vs2Q|j^W`(eJ)jZyo4>#+00(vTwi4$Jd# zQ$CpdG5O(jBK8W31itYBd0jby7v?oJ(Xv`8fxqQ#<;2Kpm4O70DfgGGHwbfaH5bsz z+6IQ)i}{F+vMKR#QNY8NBf* z8e^}anX_CJuhUQ3podRP&;?8bo9JJY`%e{Ynv{$dNkjfeOQyLvt3EC64*FUvZorJzA#Wn04@YJ5`9#5<$=0u>{fVbZYwAFlY1EN zDm|IKp3J!Fs5;pv(RM5NmpL$?uB+mau0aRm|4^`ev_1pntm;AT$%0WP-w}({bZ;e5$e0Un(@&I`#cw^!7`RZ!S9-st8`PW2$#+3{Ffa=oqI zPFiU;IRI%kQ?uC=zyWpWHf;~J3R$kpj75x`54rxjCPbPHD0>k_s$!0#68=p*2=R%( zj>4ER<;6_G6&MBOk^acg6v%s3He}^9?UC|GEBC*Yud?xZ12?4sv@^<_kXFQBgV;gx zl?U=r`BQ0|=SDs<5_zN?NS9zFhR^}_Wk|F7j5JaYq@gy@+!?@hpiHi& zUm57T)p5%$=oaRf-T!$u$4O2Xdl?$8`B1`4(AeK+#(LHHd&RRS1mG%(ZA?TZ6m>jq^h9VPVE7nvBIfA;gZfBG!3xmQw-C`sVQjUX z`<^wiZXzDjo@iqt!Vr;36na3@JtRtC@B|MLk&LYarU3k(+AzUy3^)sBP&8uw-r+~X zx<K4wvnVJC|DZCI2lB4;P`axygx5b;?jhUAtr0RL z8XXD{kkWY3R=n;fa%w=}xosPJG8)VpDEeootiRwU1bwV=8CT&3PBr>pzJd@HkI73+I~x9cqklT<0FOhs z%SR?)g4Xk=Yp_U3Y~ zMgM^2n3SQ4P4}NImjAhqkJTW)0m7hS1C(w;4QMf|X~2^+Lpdo8;7x%TpGJyGrf2tN zIo4D9HwxvxK|uy=H&Qu}FvuifY>Fb0(TIdz?|B@H;W5k!Wh`JnD1k-6+*sGyx3HkP z&c1;!UT3eOmX;7YJ4`UF+;(D;HP)g&oIeGV_>ft5JAr%g2Ew!K2(075Jae#`zzL_CoHfC z?I=A12*$HCWL2}|QJ(*^Z1voQm-{bVMdTj%ZkA)oQZ+!3WN0h|eHQs9088M{L-|x+ zJ9XAN#SPa+eI_jh5iTiG9oaQRtLz+Dlh&pp%Q!U=IQp2PkB$b=dS(#F8m8J^Zy%-w znQqi~k(FcL1BrS`=>G=l><$Whmaj3(C&kR62S<){6>a~5Pi&!>hqBJgTBY`z8XYSX zd>%j&O004v{_hDbz!6W!!czx6rN*Z~8R${wanl^+9H?=G`!qWGUF|*&n%`cj{TJ5T zUhA&qBaZ~eBU8nbp%%{rSUeBFv(5AbOkKHx4PORrc7+O`id`xwScd+S#Y$E5J+kGr z=Eu#6+(L?eF-4oEvM584PZoka-EV4xGt?g+F42Ds$3z+C1JH2+cl&w*iv=l_0bdsZ zEtP#MlVW3_Xpmx8c@?2}Qw3aAb6ErhfMc!?2pxBI7>Ei1>rolMN}~PV66yvdowtmT z{|3~Sc4yFcc32ve&=(Yj`T+L<2rRD;^r3d|I|x;)yqze`vQhslgYu|Gnf2W{g4xMD z)gF-U=qCy2mPf~2>dT)FD}(aD7;unHdmNUrt25Zu!u~I^T|I{pcrrTfB2yqnv7fo} zupCyB1sCJ#=?TgvC_h$iN$+R74oefemv$Gii}KJMnAk-byC~oLJ24Vg-*o;1GF4ba zWLEh_L?=sl5GpS8#my3)h&}n`#262LanKv{gypKW$-lYj3ALI8rGV%1t``g6LD%PH9}!&xC8Nr3iu(? z!AETP`G)ldCC1+6`K&W_O0>ijC8K{EcYn)5QD9kmEd&*~Ys+(IJkD)<1rrgVVCLm) z?Z8FkEHEc{i9qDC$hh4^0%q($UYcI`bav(xC8I6YLmab%OekV&TGJ#Q>z#0F`nU%U zOc9~sv2~SS!{w=Z5P2kI6WHBecf9lq$eu?4{FFS(_l%pu$F7+( z!r4Za@#q#Nr-Y|O_P`4#C69A&+UdS5VzbP<0J!WiAF&*gU|w8*2=~Hg+fy4 zmWGZR`a-@3m2M_6u#|K&klp)#`!uUR1>DLV6dBlbAyb1(9w|eXuRxwSjzabxbtuu@ zY0TEBuHqco8>rc{Tt0KMHep$Yv@#>h$pR`~rU4~CbiEH#O5`@Nl2paz0vx1hHUG#7 zdfAi);9N`yhh4(B`i!vix=>!>_B2wcRHJF^*(vd|%K$|;X!^5!QjV99TtP-%>2{Ag z3QAp*nfW*LjcJRdwo9oX?#$ogGb->TD&07LtPNqg;cIk`*2`iRLULHgEUkl*C<6t3 zjgOV7zUVX1M>V2@M5^95v9h_{`J7gP2aoOlnT(Yqt%~RQ)Au=A1e&WjudDFM;?$tp z+z8M+FfhZ&GK}M9VJ`Xg79~XrWg>A8cP6w^2)pRcY4ng6gpr3~gWaU75Ja0H6oOjM zxZ+xk=SOhq$2v7V z*6cYQ%Cu55r_BR^VtpcOGF~{PQi^32iw7TQywD>~M%bU&<$WBC$4$vni-iQ7`K8h5fxjSVC&KnH*HHxB9wzskDI)p6bN@7GjqOVR>31X8;Z%#L?)RBGD-Jka0(N1K$IN zr8<`aBr5{pi+(~gl*osAd_1oqmf}l9H^HP--|B7SqK|z*D-ekh@-e_u-1Liun!KS& zI+JSTwHc)x>=_}iqFomg9Q6$|nW3gre;yV&CTC|nrNIv%{h=6?iWiqoxUZvDil>kg z`OHMRM4LDNt4fzq)#`5$aVc7<`rDeKDavpAi$^^HO;A@iHa5V=YnX%4D>P}f*}T_t z0#a?Yo`U|=^n1jo;fQU`dE8-gvoThQWfuC$yO?=@Ig2`p2Vkb-oOA1Ch# zY3D@G@^m9NaGXr&(RCHwcR}97D`Yel8C^oYWCq2pC=v+#*A4;*)Oa3;6L^v_!aEu0 zGyxYyB7@$Uqz4qrWC)JT)7s|x)vE>iBty4kt%ZP>`P)nx(8TC0xt4Dz&*SsO+~5}% z^o~;~jsbGW--Z&A)098m;DQAsL9)cft*)}ar8-{Bl~SeSPDmclKF*>&Cgmd0sm+-( zqm~vv#UG(vf<|1hD6a*^mJ4HwbuHi$pPN216lT+!1G%^oxW}+@1bDvcq{ybJDv=LOLew zj@9w-rY|H1?}Bh?g$QPkvsFA&2e81b0ixwH6U zSCrHKKL{QBYn=1O*p3UiQiWV`3r3f-5Hyl2_Dd?pCXEXg0!K=aaHX8c`*Gzd?jJlv&DOZU83W zFP8E*9(er1-)P`3%80w4(a?QK=sPU+uB2l>h($ literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/htmlparser.cpython-310.pyc b/SSG/markdown/__pycache__/htmlparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b279ac770250bd8e5d2375b9fc2d47bf8b7950b0 GIT binary patch literal 9048 zcmai4&2t+^cAqZ{1_Ka;D2n>BbK1+MasaCK;{Rher<}J^b9Gg!lu9a9>&hXn%5HwI2LK6Bia||( z&Fk*h@4bFsqx!^zDdG3)TVGb*`LQH@OP#|%4V@41g#U`T2gdRS&F3F5hAG zy}&f@Sp6Y?w^8$Uy&!TU9}xAb^=W-8us-I^4UbqZ!Rn} z#M<+_{k4mmezeuxDEjpU^J71H(A+ox?Z2wjcNdcF=LZ-q45lvt*UvvWn9|s9_{BQk zTxjt6wpWQ3hBHiaHFA06F>7NVXa-e|=4#}7K^Sq@nj`#x-rn9`k?nbWvlw{Mf?YHp zxjs(o2Y{~Dx#e0_Z_nEITprRwKW}p1T61@{B5&V&I(W)prRYtaP`@IVe}# zw{{QK#2R0^`!zSP9{RhW;byW1QR>j~_XLQ2)x5XsM||H}-Eu2i!C=ENV1Vqq@7}O2 zH(=H$q3c$xN6pZC<-%3B;?=w`#Cr}D&Aau+KKD1bBJ29KYd7YvU%P(ITHEpl>>p6m z`aB`8HTSe=y;Hm)wyL=i78zaQ$5&~gcSZT$5o%%bwc_>pcZ%PG&A*5U`2%NU8CTiJ>H%8s?=aS!Xv-M+bA4i|=V z`cp@$S*zZl%Z4-WZTXQGHo!A$!-F(>mK#Of-)Kgj*a$0%HRoGAqks6JStt!Brjy&k{*dgbuS2XCVo!E4Z&wVt}Qrf2y9^DAyt=K_oV%**RvU4DG! z@&NX5JqtFi-I@1-O06D(He!|GT+3=-T>t9g<0}^r&l$AE-2Z3l-0cfEzc~C-IUFJH zESLboB{;v9G5w9~1AM?LP~fWlN+Qf!jd~dR8@SumdaYL9BVO-ASwMH~x`RdR+hURS z0?=Es?JLQ6QroxDdmV`f$EF%$#Uagpuz5%v%vSEJxpnLDm2zM|zuu{km>>SB{Z9C= zXm}Frj#N^ZTvC4}Rbh#k`n6HgL|bDiv{Rz3vozYemu3dbd@a3@O9nGp_G_t>VL3K| z+GKe)i8{*)>=^1Cn_|aNPq1lr0(G9vu#>1K*(r7!b%C8>XHg$xZ?JQyr`UP+ChFtN zVi!Je@3(~vCC+mWOJoc%w|^rbDF))uA)BkD@l_|XJNhOi~ zPoZqXa>K7jygcv?!g6Bs%MHfsEjAy9o8{HjkIMt|UOv#xMHr?GHhMq@ryUL1kaOyk zYRavd5w;fxNLFNk4Sv)nLef>OD!8gkz+(-uc1< zQNVsMaKQGWWj%!M0_SQqs~N!OsKAy&beJ2t#rrFEI!^fkL@U;X6Hx;lBbb0#cPcR8 zUTh5b&kbM{r#S8Ljm)#?@jMj;DvnWc62&T>Fo!}{bUFFU%dO+1#}|tnIeQwj#Mz&s z5lJ|mOs9*KHbmrw%q`R!KZ7zAqU`EXx@*w8o=i?>pwn2=@6XC$*83QNV~R4jJ<-iG zxjTtA+|*>2pk;{hUEZ8WP@n87}^Vrj)L^vNJBLaf%jAWnmNL`_aY zUotDycHpw?#@Q{glq22}XJexocwxnbpq@aN&(N6eHX2^Q5*f`5uQD+LmpstN6+ z0v{vy8H|eJu7#Cj3>>K#(~u)CzSfi5Ci06L>}IC0&bHHd}=`I0!*vK%PJEbUK{g zK!9}u4C?$10#rb<)UF#OydjkC0fQ?FJb-aA-G z$ndm!M%L&OYnt3TJu1V8;75o`NRcE6qAsG55N=1|X_7F}Q$gZ9k%`ELhlov&bW-|- za$4%DJ&C1uDBRIozZ4pljKF}NYn*(U>X1$_?7oO~aPQsdfR=)OiT$AIcgX68g5=GH z1sf1;!T+~5;Mwn3!ex8abec&&LF~1cbU*L&(7b|S5*wgM?t+bTbecj&cM zQkO(v$f(>+x1j<+Vx{%lBP1p<6t=K$P9Z_$EBCE2(c_b<?T=sVS7Ve^UjiO2=ekG zsK)8mBHYG(3K*bcJw&Ki*-E%z6T}<}TT3KeB@oi-JmPQD%Q3c0ZhDcEtmF{JzW|J2 zOfFpjTc%`FZJirs%-Dby5;X+cLD@`GEMQg)@IERw#lWLclf!ekSd1xsVKQE%|G z=*AO5hbG#MBgsS{#DqhaRtOB4dm&HUqSgf}E~2p2#GN8`^P5zRUDAm@ixQFi1`r{M zttk_Gsm#lYtp7Q$zNO?9UDe?M5VN)K3X|hnb!4RxR+C;Kb_-S$79b_i#Wf^9z$A~r z5eAE&AR;GMcX-{j!+l&VLCO1eD!D4+mK@NJ2=Ptd6l=me8a*&S2Y}z>5t=A3{K$e_ zP{wU=|1h+|5iK-yZ2q}_l#t|3=J^riVE`b}8nn)OTjA>wQk!_6#zUTE2GVG@5vo8M zWilL)#<$xDM{h_Sxi5(%gIWF_l}tM=1u{#K(dp($?!pJ61v%pn2x1_`NgL(Fb{_ud zfxHrgkXI%!qZx^YqrRPPy=gs2+*1+xkWC7$xhIQ6fu$#Q(MssP_VO?I2{X;f1%cZt zI3T3(fDJ-{E#dtuMV3T6@lXxOT}b9>On3)s>JA`dW1u2q4Jn`_{#9Whc!P-TUoa^o zw>GB~AjNONZ`I+yPNO{oR)g8+N6pUo0wWwJ!A;>2!h0SK(3Uuh2m-CXurm4AecNLz zD~Mt40S;%lkBZ-*f`T^w9*rKpk?+&!n1H^Ev2XE&bZgGbEfKaa55oFlAJ;>6xpE?5KnETmAW+hymRY5{}^z%Gy+?)@WTs43A{1<;(Q;2e%SnP zv=O0tN^ZUJO0xC=UyCNEs1^}HzSE z5y&iFn4f>%nV(-;6EsP%`0y2242ggg7t-4QF~s3DBuK!17=zT}1vq6twEHmn^D4Ff z8g4xnMfBOk0Q1B9>WjCZcW(dS#+m-24gXKWZBM?E=?#Bm(~(O={yd^Ph$WO4LKi4D zH8J{3>ne#O$5KomifVeLr3(jVy`?^WywpC*c4YHQ6@u49^5w4 zBV2f}qqi;2HG`c1pLraj(|-dhq}wwkpOITqck%PMi&nOiv0))ysT> z@=%GYO$-8^nsQN0Ihi<$EoJfcOIidbP3TRbOGQq^5UF{m;R~}hxR$a%9NQVD`$F5E1n=LLHFOH3KZtb{Qf zSK>cMqxIG(NngF#%8{TK^Aq)}qd^J{21{<)Nz`gpn@B)~^(IFuQlt+l;D`CTXZowNS*m(t2}bh2jx+e1xu)q8wU{f5$2eh?A3( zXkv)F0xuQhYA2Dq2q#JO$rm8VO{t?r>bBNRwPB}8SP*ii+Nsd~6iLn(^54mAcoc|P zbkw*+J-KW25FlZ0rju^N)5Yq)#R{faLC0*PZG_9gTH7FahGch7EvubO$LwU=X1K8{ZEM|JJ3~0PvsK|O1Fz&f0{8G1&W#q$rP!*IYv))7$=*MLhWVaURyq@% zJR{GZV!+?G5e05b-AQKlC|I9}3e=}mFh-Bj=qag_57O=Y;0>VQv2~}2zR5n~ViG@c z-Qx^c@^A5ex}9!o6j=&R(WO3t{r(egX4?2Xjd7`a@-U2@>wEmOB=WP!%ROu0Yb0KAOTC1Hle5b?6n)Ls0aWJm%3` z#Jsxszo^k0_kKpVx8I=e2Xqd6KQJ8!v3L`VbK;!i{0(wC{WodH!3Pn?5!rr`_2!g| zjSZyNk=ljBLk1=>6C!*hn)2IJ2o4CtCphqs##X5KjEc{x_$w+_sUU;FNjD3DN?g=U zbmNKsmkWFuuK|q%nu-ju$i-5lZJZm zm(y|v{wW2G(|mh|Q!12SLQ%@&KUUOvRP#5A&B(9CnPDH_V)l`rNJfQVhPX=j@C-t*C$M3h#z)D9U-L?7cqsJkmGWJEPqJkC0llty zkr$_bw*19MpRF#J&LBC1kNbn$3XwgyQ54~nOtK4~i|^^;Y=?*@$VWtrjl~3mC9-9q eg+h}+C&*JSl@qd+Q*(-;>57)u|71?hsQ(9!P_Xv^ literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/inlinepatterns.cpython-310.pyc b/SSG/markdown/__pycache__/inlinepatterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f4d9942f9c3bcbe0ace388f2d923d85bf9ea62a GIT binary patch literal 25055 zcmd6PdvqMvdEd_L6N?21f*?gvlr$tI5la$F64aA|NP-|pLLwoWfGm3faJkqS081`* zp_v6itQU%6Dz@@)YPX5)zI;HRCN6tg_np&c+NO=uhkKHq)28*LkHl`{vag&}eI43~ z`ulx%X7&Mql9T)qkaPFG=icx8?)SR)PNl0W8N%P^UVgLi@=7T59VVLp5=flE?S88;K}hO%89A+safZ6>qZ%+73&naXZAyRyAz zcXo%lE!$`IWOtg|v%AdR>?7um>~6C!`>44yyT|G_cRdoyK4w0W-D^FTecalceZt(G zebRh1YnXeo`^?9(`^~-C1Lot|r_3kJC-J7IFN93P-1lzC+-F71ee)r6|5vp;`kiq0 znT`<34wz4&?5R6?_8?MEo6jKijCBy@{YW1)`;qRKbQ%pjeS`bUsHY92%Sn3NA7ecT*GdeAz8_8Fv4n9m~ptTll2QKX+UpGW$6secUV z7t9xteo^WlNBSl6B+@6P{vgsXn?pzsN&OQ@zha(3`jn)fMf$XP2I(`_v*`ahq=(J3 zNS~GR=aC*U&mn!zdLHF3AU$fHNBX>^Uqt$Xc@gQ0){7{A3F$HORis~)^hu;&GcO^1 z$vTPhmyy0~jw3xT{SP7ix_JfZE0TW2j825oAIr~lghI(HtJQ@{*|?myZ<>{raxyt= zcv;28rINL1m8!|+yCRx;*_WzT2IR`qB) zlf0ZSVz$LHYR^?{BX7)EE5@pow;gu$$z{7}Oy+McRIOF(R!c8-Lfj8sUN`NtG?jPTgAsfamW zwdO}}FZHL51L(fGOd#d`7h$mei(1<&cwNp{3k&^=Y+B9Vlz}cX%O5<)=JhFW=Lk0W zJAg?Tql$}7VvQPD0OV63E#n5^xm3dPG^&6c7+UKoN{?cDY!p4xnBWu4e`E z#++SQ^f7fLIlEjnRx8U!0dLKlCZ8gCCrCJGDwr}W<%3m_=y1uY80AXU0%(E2JVrnm zd&p>lkwbI|J1Z)(BQ+!o`6YmrjV)tkp#n6{mMVptK=^q~u)0um&{f{4l#j5=s`L-y z0VY?#iIWGf^=A&H4+^45ILtH-GH>5O^f6Y(M8HcVJAgqG-aPv9FjOG5lejTyPzT4f zUtX|ZUa(*K-H%D1=bA39%9wx%6I2h_t}GOR-pyvte)CI9r6K@;8G#wBCLx$VaW4Nl0=llILkSMjJ8?{ zT6#cUs4kX{u-}!+a>+CZqva*lNgm|yIGN-`MJjN7=mB$Xz{laVnAKP^GFvQ|Lx$x5 zd6*ZvoXr<*R*MA;Zy|pR$gCLi6)bc}AoRxB0+9fyRdTE&hEoB(3$b8Wq%*7{FdWcM znX*O+@dmX)m}XfzM*hP9C7M60oS=#EY^A;^nY6OtD- zV?ka=H7R*e{)4>EYD)4t%w&+)Rqd9%PBRtcZS(r;GP{Gk9YfN+m1akk+F&!Z(-CsvE3r+=6%t^)QV^M=?$%Vm>6*5Lg?#jonxMMb};_b#?Uo z=+)8j5vjN15@6k4B@WF`-qSZm@V6Itja?qTFq-o#Jua2Dd|$7s-SXDQCy1{to9vh3 zhj;tPu!qL>(RB{LG5NYbHmODIZd&W|@Yp2+YwyJJtOGW(T(u_i^X*l;s0ZiHUL744 znC!R+YG|($Cr4#h9i!uuW0Rj`^w9}10VkmywO8*Ne|<7HF?sd%@e8cEeVDi}IyW2W z-tk5m+}kNYiUpbJu&XbcZtUht-kx{d?xG?QPlLV}vR?sJJs2A~IsFD~6lZ#=SgG36 zzH#YHE2D9G$+RmqbHH)trza*ZO#4!AT6O0y;LOsh+efv<%(WJb;U&&71PQ%Y>+}9a zw3HV4AQEnbePO?>SL45kW}il*TD<@C(7x$?>DtTvr%y_JI^B=x^z^3&*V2d6{ik2K zemGtCOAznR96pWgp?%lW(}Vq|@sqAar>FZf=~{RH^r3YB)V1kDGl$dD$0f~fqzTeb zOE)X&{+#5V-j{3imO}&dHocOr?GLKqJzLay!LKvzH$2|x3!SLWyu9h-pE|MTb-ssV zM8@EnH!5gyr&UJn(SCNeY0-veQcGHHXx3EpjCvd2jj>E!17P?vFHK0w*ubi$YLV%E z2>bf`uhplWG#b_co@uB46aaj9h)9__KQ~jKy0|biHCdfWGnl%vG&42f%$$bRJ#}s9 z)XbsusoKsVylLo^v2W(^nW<}M5Cb|+0mDQ6(??HFNmWGr|bI$ z280Us4Ix$^7&w*Aqz?@Yq)(-51O3mCDh^B?8+d8vaDDo~ZN~BgbHfAYk@h;3!t~4O zTEsA>%Pl$p^#SSuBY2z_5!6HLa0=elY{>9+$p5>V1F4EU9eH~s&x=tH_!0fl&;;5* z?nP?{u3C!~D3@<7!`N_?*^;*_%hatriHHs+BjHe@RBA26vmCL!wTyC*qE|(Zj z<#KO9ar8>!xtv)krj8mN)!Q&i4P}l0AT&Sw81JZSE7h*TUcp_&*cXgCR z>kMNG0}s9!nAjuGDPXL^>`-0^QJf@2jl&-Y)N$Vyf6ZE5fzbm`$h^ z8pE3ye$dfi5{uO@_P+=GM&GqAnwO{3PdhX%pBhic?Ms-NtJ{_vha-_b2sd`_^~vE& zm)!Up<6|SQpBr^My%H13mVFZS?}gonW0mHlvE(#Sg^)EUs#q>obGct4_~^xtbtJW< z7W*I>uJw4x$@rBvtYH$k06vCDJ#>RBQmY*_u8L2@$Q%BMHUUD$NNe^RXm?Mu-^5S* z(6(=IxBMzT#dhJnvY#VA7Si}Eg+#lroBzjZB5cS$OgAs1#LJ`*o_2~KHE!dNS zs3y!qBk`P526S7R&)`3{T{P*gfPtvIM@fI2hp4W_LZ1YAu7o}tnuB?1hTl!B>D9=M=z0ts zLAy(i9C>icpVe|%SwXO;pd)&OAW`udH8F;UJ)be$m||NkP~;d%dOJ8)+8w^ITLXW3 zFLWzp?|lzK4`a|UBACI|@EzUWS;vS>eLVsrIC?j97f?63yu#xm#@tXk=0@is=a$?s z)_|KV!riIdr*0Sm#Esm5pTbQ*cwoEWcJ*G^p253ay=c18Tlvzmbx(7nXy7a;JP1GN zKvyu4konsjEd^atOMoFHp(J*PBM)i^e4O|Y$_&9VmoGV|=pR*W@5P1LxZPEFaF%>C z$#J`+XUoi$E2ibR39O4z+pgHkhRAI^V4mW*463Y@~&A)v$E9SvsYSG=c*%GN6FTb~yPtiwctx zi}uG^(#m`UwNZ!)hioO*u}h1ElUi+e8+aR{kf`$i826=&J%vPC&wml3=LZ%t0ze=k zP%^M!;L;Y)8$erx-Fcf19pXu;4~joE34^gYA4Q=774gq)wTlsf!NEr zJ`O+V^&?O&;q`^fa<=hO?7Kjtp(hDi3SSj0VZ$t9QeAAKIY0tZ zS^(&TuA+FQ4+=_DJB3pZiyrI*)bRHL4Zgww1Z>g|aH)Es$NmTEBcLfyT~I7l355s& z)4n3e;&yUHfRaOzowdi>*FuxpV6URiz1U{#R54BAmyIRY;F5JT{2e^9?#LyR@l=n^ z+yb7KKaS%Jp7(bnXyJ|&=b>!Gj6gs}-R%wMjwh^Y$9%b-R}On-6#aEQT&?;>Vt5(3mG>lgNsrJ|MwV9^Wv&OcN-_ z??pES#IoDfWDHuJWGpZyL>(^30*{Cp^6j&FbrA>scq9#~&{k?{=1z@6r*i|l5hEewQ_vD&T4P^9jud)IOBfo(M>&>}_$GMqwe3Jr-zKAQ(={|D(DWfQ53% z2g_Qx9)_Z--O!=-Jqh&=`yDs9!x6rtAr`+IMok=b62Nuvmw+?benW0LqQLne_Ng`g zQ(#RhI;8pMb}66px!lOb;j0sF41Rw|4W(^`Cni7PhGD?E;nB%Y*q=dt`?Cnr5oL|d zFyn3Z65%X2L(34+LUEY{$@Ocy^xD2necvtAy)|M8<(JT9gN}8Zd0V3AeFLSN^8qso zegGE)7HHh<@kk~=@3Vs`VTPV~4mQpqS81(Whv(zEzZEKM0$yCZh#hd*1Xc{=L|VE4 z2hImfLy3(RLLgC)7)J2CzY763yXrvaU7UOqRYs>)FYFU5f-0l%OE-)`cv`PgZvGgH z+87Jf(N_sEz9I!3z2^J1gQ+$Y7814p$6@i|upBO1HLM$0sMv?o*2DC`)%6^V55?kG z8Jsn0QKNU`p6Ih>P$tT39Q)&v7DJw-_SBX$YZjLFmK5?Q(dQ)L z^e6a2rnaVxcTfuQ?t{YG*lY5b#e);gjNdGFe&mGik0(N8l{Am=JgwDIH!PJLA$3D)QUt!H=iefwvbx$DC zm|Q?l+IoZxg`y1lk|LgHJMjb(XiGJqZI||pUOTWABtIIDBRMXi+lCa>Qc)q&ttt5(v;nW{IagVRmj-+4KA)R= zsJjCGP@W{>xv%{EAiP9P$>W~QkFD*w<&qpQ_O`J6^FoGIEqIk$EGudPlu1GxbluR4 zt2O|q^)($-0TsDUwIqUAqz<`sS6_<)&q_8$bFpe1sl?q74#b3S>h?h?i-2yTxsJLR zgyRpiBSHfAx!msaed(kSKJbadchv?fa+BaIzIJ1xqM+Ihm#nfI6V1=z&RDUOjy35z z+$%dSJ7$A0WJ<>w3~ojOpCJj*soH~OL=C9N(}LQO@ew)L*U;5F#6RIEreD-%-iKm; zckseC)@lD)Hs@0D|G0DmHbs!MpeQVefmH((MFmCCdITuqGFB*xZGj@RsK~-|0M>2V8Jfho6MMD@HT_L z%;2vu_^S+f;%PIqeQ`6iQAODbiXRbuKZo~y9(~(?oiAo;Ys7g1=maX&j^o5Xk5JnA z72ie6V^HEJR6ahV<)C4kbxLdo7=0EAZb$(7vhPk-3=Hcx!dPy^Di@uPOKm++1??X} zLqPO4ews|q4i0338wli;7-SRpqix{N#xVib&!*&5WKJE6+zbO>B?d_3Jz6(7XYI*N z4*E0$y@kn;nrSBR*Ex@%*72TfWO9GDTqz0$m)LYS`#*dc7^p_6=0jZ8uBHZS|h6zAS z-lTsJ`JLL)%>eChp=`4CV=uASTMR@5{%ywOO(c}B@lE7^jm?>EZqDp|*jvWIYcH~! z)@>TK1ta9w5VjJN*Kwh&<8}7WvMEy=))r;)89dH65H$M{tcZneK`U-WZzS+^@aO<( z;7-^pNw$?lf|Aab5?o{pN>VK)F*6>NbhVWv?u6%~xVWZ{OyKgG9GU3B;fii}HMW~L zk&5456Q@w|yTioEQ~dUsJE30hvR}kxY8U*oBu&11p86EEg8HZ@pVgUOZ!dwTx*hIf z=e;9uPR5Oyq9#1(Pz{b)iw|&$@`y^|RE(4k;?xW}<(UMpK(`hP5e&rt**!lbq>hdjEDuCh-s1mTnTRP5-v1Z zIcIq`x8xnYYZlZ>=7)>sZN=|b(58*sxG8n6PQC0GNv~YWNj(W!9noq}Y&Z^>X>QOm z=?*juCS64WTe+h6LbZuNT2lFi+*^xDEQ%Oa85|Bm4CkA{e-m{o{0bPc^=LhQSJOi9 z7HU+Eq3}eY@VqE(*pJ^PR*(B_0KjMfK!q&oZ<&U0Rmw58(As3GaF&0FLAx>R;L$r9 zh}C-?O^kdTRqTJnU^8L*5D2T$e3KCFL-THIxA7k}BlSEC+N$x^(B|lW573SU&?X)f zOar)Y#E3xmyhol5=;Cz#tvvWJHbe*0Ct)M1MR7`ei9^<+M23zcGUOfgI(2;`Mq*oH z#3sM8e~CeZ^O5PfU5XafPzGJXmVCV#wjLcnxfLCIZ3|Jf^Z&@PsRqjGO`um3s})bI z3bo-5$rdDcGh~;bIFP8J*xl7_)6~}zRVouW(tj>Aa~5RZv6ieuE58ZB_tu_z@Mn#hxOewdh*RjI1T_&V*B_<8;~!p|w~!;>XnZofO}1hJ!bM z+I#bwdb0`tg?cw*eA2P}PXQdk*4h=aA33Y$9{|HWnxOPLciFBR{n($iucuE2`P?BC z+Q9J{i$^nX5Usfxm>M?M*-+fC8P7gx?*cF99y~e$>*K9B<-5i{A^4&1puiq~rMYih zvitC!FQF3m@xcXP0*aT5uTkU57$Z0hg99ME*(3X&ys@Opc!dS~#SO<;6EMJqIVIes zX`hI9!_)^Mq!PEiWs7tL{mFH)CQzv@YMFe~%*5P3KRziIf4bWi?#0spZd7*8-43ga zU2Yq&b`$3!g&IJARO{T6K?-$ZIf5C$QG;7LSSHYykh=@H(07B}kSuqv4n%v8N6K;!5j*f1`|qJk z`|lvYX2L&V{2w#;MP@Xc4-F*zOP05?LHnO0?;u<`;%!NUyD{Nz0HU@f)AMlKV)s&d z2TZSw1Q;IN<7;qbLUz*%2I~V`dj@PR1{N4sT0EpN6qvi%;DJ0&*vIP$m4+RaP!_Q$ zIx))j8*G&Tvy7~TwNP1mtCNtbN5xSZu5G(QT=>ht$#|aA4*Q>R&fjEkiNQZ*AVf=I zl-W0^6kb80%^4PvK$Qc&QC$G|uTYTH7mv3?@*47w6Oy|TiG(90Fc21Y>G6-h`uCI3 z+T$C^sM&v&-nps%eu0De7y~+fa2Wb>M=xKw zI6N^nfqVrVQjMY?p(HLI1su+*X{HUMfx7eG;`-W)2>mI0!h1CJFY>)aA`wq?C%O`6 zQc2OW3#ioc7Z?SK)lgt!_4OQ=$1(vQZnH#=Ve3ys9yz?rxk)d+CEy z*AC7cI#}O7b#4F5q5aHGpPo*qPs0K3Y~Xb9TDN?bf+tykUkw=gQpLiE9&&{ z>>bMCo1mv}IzsN}l|8|-Dm+J=E%;x!p1^_o1Qa@`TC(LG7q7?jl1F<+hpLvKJwrvU z9udwLQ;ZJBMaIWnEsr~XxIBncnZ;_!N+;Yn&Y4bPJ4KEwNQ(F5noL2F+F=M>VTv0W zpomwpJvXk{lxcq(Df>GNHWRz#7Zaopa>3nN1R}K$54fNn(^3${yF_UMp{}$3Cyw7tQEe91kSWdIWuR)H_fkwuTLswNCt| z@ZdtnS~t?$5bwdW9pmh+;t~a|&^!3Z4J6>NtnI*9cR*e5tM{2n$o-Xiw^N4gN;NxP zzFAM0iJKAo7Jj2IhSn10S=8%9y`QajnX#K;`wkTO-kZAfl0_rtP0J6KMY$ zdi!bihPLtYIj^^$@q3fBZvPCR6@4VMmMot@?*aGpTVrr4n6Eu^OhLf_W1Rv zc}UwH)b;w4wpcr;hmrX2fF6;+{JU<3(aQkl)BDzpjF4#sJC607v`aSH)sw(Ipsi*D z8iW?JtfA%r z>I#oGUhhv|=TW=sQ(VF`OyR=3`s#*l;W(y)D;H8LW7q{Okn!`lAM!5{5k=lU%h;zG za5YUn(E1-}Y3yGPGwTe32bz&ix$&#&4y@9R#Fz1BSc3|Z8@2Q0c}rOY3(V+7ZQKmK zG@2W^G&($PFSF`-2DcGFeGv`nZDxqJ^liqz!{ECNW)Q$iP`B-K1zbI=X1B?mYG!#E zopS6Gw{2j{WK*;aa$XBJDmTlrUJaGN*Gh0LN5#8~^JRF_bIM?FvK{%7U$jm2+Q{gZ zc0tBH0*cTWj!UTF}48P$Xy+_*(HIiWu7%FTMwCan| z(?*LWsqcl_p3-ZNZN^$52AJI<$l{y{9$~9Q9@*-A2%A(eNvdF}9?Nf39oeX*l!6F3;YB4YE7kdi(XrA*VVy&KWzVa z^waEOkVW!9n|Yv3sr_y)A^1uEJAM?we}geny}ujQAnV^^kqAdwZ@9US+^*7Y$ROq50YWjRpUM!8Hb? zSes*L8dpP&qxLk;!F3}-HI5cAD6ub970rsUX!%3k&>1}TI|yK39>h=WNJIT9F7n$X z>a#kV-U(|x^+}5{(4yW$;txoRqTSf4Kb;U;GSHt;j&Ee(=ma?NrW#dTX;k*FV^D4w z)55J9yC5vg4TD+0{VPxx@W|fW=DIty_PEm8p~XXg zS9?Iv+m-$f?MU?Zl&8OUdHVYS^jdo!-`3#woZzC$V|9ab^NNJ`nU&@}S}t;*zA^^3 z^qj-BZPU)_=_y{DpTYI*>6sSpM*A`#Fm4EcR00EGgrUMyQKt$oYC6QaDC$(q$pw*) z-iY1O>=Sa_SSjRj@-kIYk3AZKt0Mt5F|-cX2GkDp8mR74lSh)n7?%*5{a+D-_8m|b zzR&^h=9uaBj*q@6A8rtF`^FVodm?k`P;!$2upA*dddhC{pbBhP5yX_Y%9SC^N<{K^ znRkmux0Ndj?N-@M({ZF z$pB=&2CqSb#~!+~4cl7SLll5QcyMm8ps-wV zDVfM|qfpLpxs_1eC@}>7f{q60+W`K5(eMcZoVImOj0!?GQ~>3KtUb9Us1>QSK$+_{ zfbuaUxQR%+k?`d;1OHgHy+hJGI7Isr!a0WrIw{EWH_)zO?zL^7;S@VjP|?61%}~ArgCW$A)DEJS~6VoWdJ@k$4#C+5!3I3(s{e zQg;WPsc%M|G^XY&73}-Y;DiY=bIuU38FZ5wIWcB|+a^qSM zssDY}Y=A?^iu5)DNcdEb<(!i5$kd*G=$C;Sq`&zcr!DE2a2yUm&(w#4{s=17`i)l7 z#iz}nU^fx)KB2cZXg8Cu{k!PL{(u4Rc-qkTH-Li_GtSY0+k|RqdF8|5;8CCrw|{eQ z4O-*qF9j23>O&_yfJ(KIhW0Azg}=d~e1_bp^FGalt+yfXD}vVTJimu~6d!9ibv@m7 zSd6ep;2FUq3R6INpebRxq%LlHQ27+~vl^9Cyj~9XY{7zgfr&N}Pw3)T1RD==;n?iD1Pe0jo?g$_-YbA8)iWgu+eGmMGArwQ9mrPq-6alp{KqqG|oilbu(w)i+z zvv7Zien}`#28dzmLlJWnm1>hB!h<8W{O%PL#I13}3aS{0nyXaKcv#AKSd#7v7)bC( zC4_u}dCpf5G+|0M-qz)4i6<@vJNUE?PnhHKnWxwE9#-Yxn=kz7^JIzkPeu}?>{gj7& zBEAM4j{rH@a*&SLCjC+FIa~u(C{?h#;eO(`6YOnpipd2!3VwbqftTs(p*b9ygRl?R zUh%*U(;FWXR-dMjua@$g$2``JkJIvFqBPKP>B3g1m{m6BBSnH(};UBgzsa(hqEj6NJxzxUm}SJcEXA`L^)he#Eu06Z*bKPjtigFR}5L88q0=2$NrD!1*c$w7{5fA>mO0EAqAE8J54zfO=y# zY7wW|xB{$`jVXdxpyinwbxQIY>gGAZJ&5=xq3|`g1RO0;n{_RL?@jq!>_e#JAzX|s zX@;*MsO8{z*k)e(l=8(C5Lfh!>kp+8KI9A|!?C#-#ZTkzyuDb;v>Zp_M~DS@Zl@=7 zBxfg^$inFylP}xK7eBJyJol^CCDxlG-=E7y$6p`2e8D!@WIqF!!M7Ovdj|i3!M|hh zs|2G20)XYdq*LkwPEkYPZh!G4y(a|{L!Rrj_41Sow^9=Se z_yPmoAd`zy>LS#WOn!~Q&oMZ`;6(;6F?gE6K?VmHP*=36wbDuyNM$NMCvAn z)Tg~pfha-_DJT+3o#LlVdVzo57~t95L`2^U2V8jbe~C@<6X87_dZatn^K#Fzo*4dh f_VAA}^|vSVLp>cmDTMob9_cyJlR%9`U&sFk^2am7 literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/postprocessors.cpython-310.pyc b/SSG/markdown/__pycache__/postprocessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23a5a736edd88e7f632ff81d07b5ccae6cbb258c GIT binary patch literal 4968 zcmb_gO>^7E8O8!22|<)BJC0+gO|nkXSgb9|_9S-esE%xzvE9g$MwA5Til06%SIdMRf16?d`w?z_)E?*|l2 zP1zcrzkd2<#XGHO|Dw+5lR{?^O?4q$t)#ik)qPgeOFGkB!!>=QW|mC+W^T$)l~Qin z&HSL1()TsjawmS!+zDasW)6&!g%R7G#K@$u(4Rm*>rSCR758mn9!|Q`M-1bq+!>6| z(D(r>W!+R!%bj-qX=s|g)()#doj-Eqo*T64wta{9hP+zC7d252ozTOKV4MFwsMh&? z+1wKHk{T?@+xESn(j3gZ#j9c1P`Bpi8?o-WT2FU=#|x{?t-Ke^+xNZjL33+3wfCwL z)aH}z=lT}S_m|JZ*0qPjC5?8&%Lj62z9EBMQ3>YG2I`uNsQB zcsiy98;F*t+@MAVLDN!GN+9RAd2Of@I=*z;zEkJRUJV%TBsvJGk>j`GDY+~5y_y%w zHZNA4O10kK@RiA!(sn+%nd45~2txu`=hYpbKNV7ef;rn>_A0nUaf{zA-s7`M2)_JqsjyNkDxdIpH zD>~oWYH?$3ZGCmAP%N&l7wzBuKOY-pNj!!-z6v1xprv>tP~jZ-SOLy~R9ASb>Q$=T zk%B9)=J}4~VZa+w4CipKZiiJT{5u`70Ib^&dn8jY8BH zXetj8YKQDd+t<7Lp&lAt?SN6hIx?6RkC+E>&`SCUDyhy@ez)oQI0G)w728hJ4`WnD z4otq)M=#GymULR>YFP7Myo(J>(@w2XR?RIHdSNpZFV12xmy#y_MAo|4@!-Sl$dd6d zA)~YFPU`_yuL1hOCehn>>TYqMWb9qei1eBprT1En+)>dq-PvHFdalW7Y<2u_>DK0B zc#7I=ctI#P`yozkCZTJy;mV-n&MCFCSuEb)to7)yIj%0>Xh&za2$Aymix04gvLGz| z44YvllQZa4(8eAey+yQtpN9}yi6Lj`E-M+X?i$DorfU-H(bU)xop*4lHpeqItYRV# z=|Xo@NMig3A{h5VFsKf4#i7%w?17u$VkrI;f=Z;WfLJuHbI4R^AEPQLK&*E51ifhl z&;^P@pA)MhtsHj=o?hX#!1cC0OoR!Jf)|hjIf#A~*Q5xWvQA1y*7OW|<$SfK3ON)D zEbHG$er!%}U4?+{KlGctUl_0B(HL+RY0QpI9ND*@pk2TDzeJux>FP7a%7jw*s%h4Y`w~z2q5)%V;VMVQD7I+VNNg-Lc0&ZxKyhhZto& zYMK08_CV`viOp&RFdY*Tt zzu8>aod0b0t8e)2uP^7uh!OiIG?6{(?+~GOp#4P)Sy$WDca2^1fZfx+{s`ewJ52S) z(p~+6cEt7(EDtkX)jb!a6s4kx-e!4X@~Xnb^RLHo}Bv$l-@3*WZ(&1afi z3+>%W*F2}~r^4*vl$+|N{-(KUl0TW)CEZOQPIt{NJJRz5BUX!yJ<)!SDi%r9M@d6DC?;$`sE|V`%xO1 z5=V$m?TD}(AGR9!NaIN|oO!VEXt}($d}pcfV0HOkVLj5Nh%zXl8=fyB1LnuBn6o71 zQ28cV#=OWRi{uCBm1edaB}O$znT8X>(0XaQmlB0r9)^wgNl)y` znXac_W|{f&te(})Nt>Nx(=pg4v-QsTp96f%3zz$#K0Q!8g4vx~>% ze<&=M*9-RxPoh*z8TlTqim!6oQ|0(d%JH4#j0DVUG=Y%Vsw|8iOtR2#WJ-KNE0xjsc=c z^43=ky@as<9jYgB=GQ+r}%3?Qc{nB zq!iR;hX~=jL?fNT79iDEj@d^j3^To;l6ctkokwfs;^Vu;jfWeLHwq`hNJnj<#dQPd z&N}b@eCZjdQ~4IApNI(^DkNQl{HP}f{d%M7;Z#(C@RsNMd-;mH8(72^N;I7Rh)*PB5yE!2`g1795??O*tGp3*>MBn_4Ip~7MWcOZQ_Vh zvE56>lf6`&b&+^-R`h0n+p&tG^VZ!o1tsKin5T^(hSB(m3`cACD$0f$t;4;7eR3 zDKFV6aee|Inpo?9`o?u#?8-5NBijY@L-N!8Qw2S)Jk$4e>7>+>7bcTB2KN9>`CH`o1{);)plwr8qpoM9L~_o1xT*6 zyI=v(G&#!2xpn#^S%#OZf=GSo`3!E zSyEkhoc~Z~@d?nmkD}YCxYKaB%e~Cay@uyf+t2(wXar~nypmNK6~4k(zjqod4;^0R zYu`J3O?W4(ulz<8BOzbM$hxSazlQz>-$Z}Y_CsE&JGCqEKYhmuk4B~^3w98zQ!a-^ z7=Fd3Luua6L@o*wn-nu-hkYY^1$(FlZJ|EW^CeLjK9I>^KJy;y8PnJIc6Ry}dpn|rxt|=af;KX1!i}oVlnH8 zAsdNUX#)EDK&9+yeA+W&Bwj3`U;{BsHJ3RV1f;!zQt(_hV)-Q|FrLMuEH2oSG=~qn z6Abdy!tsZ8O6^woAWw}Nv3f5~dc|zRHzs4sDE{Pbjl~6Lk98a;>|mh9TY)=qA~K;h z-Yb|G?#cd0rQM!kcXoI0Zr|Cx{TX}O6Epf}-egA;_p+@Q5xX7TwOeJe0mw4M$F~UR z6I*}!Ki)4UNj`%#7 zj6ee_V|a3qW}K0~x#+}$%rGqsWqW_=gov7HI!%NkVvQSxRC%1GW6>J+QX_O95%&7G zFr*RoOrEvcnM_V!-@rhvqCEVS)nn03k%XhNs%%TB^7`Qn=Gh#jy!i+KU(@1d-s}uMU3laKb1x`XX2!r6VYk*xsqePt#!9quRmjZ`7LOPI8T5t>&*G?NF~Ud<3Kn;eb(Sg{U@2rt$x~4<$d+Uogi9mtY5hJl zV~IdES98Fz4}kUo%LSyLsE-XZ@u{FUeFUl9M6%c-WuS|q!J<>cM#@z0Z?RnRw3A|K zYFMmUAt)na$QB8b5@w(ZZE*y&e`>srb1~n@>g?jKz!+jTZKg+Ac11pbImj*n_!)80 z;*{yCgb0{D)TYGQL(z24au?3`#{agK&HbeZ?Ieq}rc*9LGToB{!Tq9afI`=-~(Hp1EJORdv3-6s== z^V%G3k$0MZXw#tPm4Oynr>xlbc5G7p0uCsBnEx@kgM1ULj;}5rIKs=rA1Yd{xOKN>6_J7i_4_dQa1s#unVQ4qOH_5R5cX6f~x8S?nY?G5_Fdy zGUYysdIuF0V-0`fh|T4JygXGIdp{?#(1;VGLm|MUDLd)p{A6Aj8U4Xln+woT#E-fBLu5-$xaS zmwm*5iQ8J?Bhe{CG+UV{x~5lpN|Zi=w5-^`srl+UrYO?aa%FP!ED&IQPo#W>qBl^5 z?tA!M!7p^j@6O$Nsk)YT^-D}$z_}!n`1lg#$2pOn<>+ULqz~WmgJY2+gYa>fXUyIV znst=SZG2ljqu|%L>TH7LmSX#zK{}#R@gmNmC3-27iDFCdXJ}l~Yg~}!y>h?v&-`;A zG`Y9~{qx`gI>0>yfqzoDaL+-ZrOiK7?_<6C4XWCzqAN@hvgM!=bcEn#CCQ`~jcSq! z7%CI>Ax#c0IZz*Apj@F)gictuUHXoQI6%?lix32aZWY(Z_*ZYe=YEC=!uSS7ZBa$N z+PeBRwdnvhrY1M=)E0Y`-_t9)tI9Qe(M>*FY#v$;Ey{w?bIZ%>=abC;k`z=EK;Ue= QzX9{B{RN83y%wsi~s-t literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/serializers.cpython-310.pyc b/SSG/markdown/__pycache__/serializers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c37d84d8ed6ca96a740e9fdada8d3025458d1337 GIT binary patch literal 3161 zcmb_eO>i8=74Gi&-<4MXCJ+*@u+Aa z_wwQ43W49pOE+6rpCRN+96bEc`-U@Zx$p1Lmc zmMDrO+q#++N_JZ+$+OVX-j%%F7Mar3)l?vJLkgjtch};!SkpC?NAIh48o?4+sz+eF z`;Z_U)^;pKtnH1s5ZXt&nZs^uLu#v)r`p++u#;x*!#HVgX(!1#U8TJQOyOcz+i8;B z(RN#?SUa5CN! zb#MdtRVj0+-OXIKW2J3WYd?xIxKR`xQILpTbwuQM;k4tQUwNhZYuK@9c9LAl<{Npd zi+~o*Hxu<%ccaOsYswH zFX7ejj*N~{`@Z}hu#aI7fY4yTf$2?iK;Q!+tBM>@4oDobKH-*;O_k(;af{nV<`~&| zBXhWmvXa~+>%=C?QuZd>dtuwM6botnxOBFCH zz-+0dBUih>T8&=6c3oRVcSCzpbkevb36M2FV2=$8_e{ox9u3)sm)_o-=phg~cWL15`f= ztJ(=xu>tow%qwtz0E_+?tj9o`hbd=+whP+crT;f*kGR;<82|;}0+9R>SSK$bITMwM zZ=zxXTMJec5bvJ=&8aUffVyP?Ir(9xIE^&_g1A%CZPK@Z(!O0E3;L=U5tZZg;!cn8QV&F zP&p)huk_>}RJHW>De`dkBCPWJ0i5NVvvx}#?&!lfFyr5r0gMCKb*KzrRZ#k602X{w zGVYt6@SqF^2;5EK%*w$q&cRp=cS%cE{t={ARV%@&!8}+y1ZzJpt%H$6qDCQleGYSm zaYeQWT+Lu(%fAn1@DZ5tH$Vumh8ga+BKVKfovHjM>^F)y@Y)F$+hg+IW)HeOwoh?S zjC0t+v9(^XJUu@@Tg=a|^xWmkv&H3=uQ2l*(|gCX->aL%SiYMdOjqaS7+|^XL{B8ZadA2S7&C68SR)f3sat7F<~HN+AUf# z=_u_KYA+Q%w)}iJt}WP9q616xP!sng15S^eCJu(f(4d_(-w{$*-nwz^ooMyi^&7V!1I9@jRj;<-=9A2i#U7GQlK839hS{uvY_a6!sj)+=(a29|LJ2Tqi#p z7;IOd70&?y_aI$bxNnmW=@#j;lHpTVw1gB|Qhbn5WPtRLf_tzvO`xW7<4#9p+#I3p z?L5h#ps-~QRRMyjZOmZuHDDjPzOF_FNAtlx1AT#^W(cn6noG#HVfa{tKC$=fNzf_+ zw!x!5V25BCXCE`L%UUxxJ)-9PBg^a~ah>+IghI=pn(^@CLBaBfQ4&MlpP_w?Kuyv! z&zHXdJ)ZR<^zt1f*O2@Y36@9$g4tYpK>mWvyFfx}%nPUV6uf{1Fi41nhssgo&;$QQ zn+M*@q|jaz#c3Kv63YSYweof+Nrk3Y0P!xS==e0lqJ?5!FgGT-Sb7pv3 z%E>O~@XR@9{&V~P@4uYm&(4|>{(k(ekE$!zB%UR@gC;PcnHXOsr;Wzh=UHyh zTAo2k-kC+otUH7EJl+e=9Ny>n`z+q)odvuv@b`kFY)ZvNJ60sgynPt%`Yr30&GsDs zpkew;Em^d|{fKq0K_qv9=E}%vOydyBT_JD+rlwU7+z{cn1duCFj6pJEfKzUM`l* zTegSUdM&iB`OLDdntNa!x;6`l(Dyscv$pKL-OxRB?~PGG2kwCvIDV4`1WKzN#;|gY zL#sKY2^t^ShYh=B-SnDR!yQ3`R%&GW2fRsf*}UHLLUw3v?%LJeR@&i2V>CImU%Ohg z?3QD#2ew_cZgm28D&d-4bsKIFpq^of=BnR5WZuqhXuWdf%GFD+TzUB$*4D0@ZhyL( z)@`x8)`fc|>*dl_-c`d6L1di7tII^_HJ-kHY_;(5mC`GhUM{`9J^9_VEebIZAEWbH zcEh^ovH&btG|ih{6`Sa;SZ{A$w=M*(Yu#L5UEA1PD>cQQIW9J^5nz6gAaK&gBN=H? zjl3vP9wBJ@C&%Mirw(RT*PeYa+j>KF2?xi zB!*Bn+RP0@uXgx|fF?FNEjOszZI|(h>5K*)8+Y6t54?RC8;s{%7SFBonPT@uH}TT# zgwob%@A1+H3B|GEq8ckrC)W24Y_=1`h4B%8DY1F<`@id}D|bHtI|X;!o*%NiDGmg8 zg?!&_JIwDomx5sD?&jt@cT-WmJ5H|DK8#Op(<)WQE1p3o0nOV`7G?U^WVVRcv2CM; zpjBAK6Fh|^lp-mV>&gQqnBjT#0RYdDu^meHl(o9+R`)EgW_ck{46Ej~cB~skEjGQt z147uXswL`v1eAgTe1I@&F*}%Vw^{I)LXE=dd*a+0@AL zw+R-lZ5RCITG%GfZg^de<`93PwFYPzCuj{&AEd1pK=}o$=|V*i1KZ$Sd(WkswhyRu znHBoL13_!p^Rm_S9k1r0r59RuV6|N0K^tgBI0ZKBK=zQgMuy9Yh#@DVo0w^{6NHv~ zKXgGNWu($ro8-GHEphrsdEyYx9i)u~AJ|s9p0o~|o$YGF4gw4qYHp0C73}&Qi0@!s zF!>)~S$>c#%d&N_>s5F8=!sOiK+N~U(dm_dtzM9>WfgN5%Yaqz0S#=l?SPnAj%B`T zgdgsGNI9|VBz5b+Yc#OaAlhyK=bdGsCoMiY8c^e#a>&`FD)bWvqLDqZHvE=b8ho(9 znY^gPTHrQnu|~a4GQWzoxQb;BXrZ-icVTSxN~qlcGaHpk%Wk@rN^Djt7)Pf;`Fy3a z-?1A>O}0{T{A#7bo(83nZU|`@2^u4bS;oU<%#buWZ}PG#%EtawyuF1ds3Vy!V@?LL z2Bdb#w$c;ZD(jqeW}N)zGOVqqxD6$jbI6hPB$rLpE;=WWn{iG$OZYDDJnfuz&Y)!0 zIR&YGh7nkG-{mWKZGw@6{4kre8JD3DLW;d@w;PAFrvmk@R5gihIL6B~wN;whyGTN* zE=N*D!7tQPMRVj3DhS9YpO$z|qa12Hr&>4Ylc@}`IgdwwY$!T@O}Tq!VogfvM`IgED?Hdh3oqC(Y(ZUq zAfJMr5h~b51vxdsZ;h;UrF+JD$8EU`2A2icCE*qeZFRMCS6+3B8ha6h;#nxJVApjj zE#Gkiv5AYvR}0N)qOlepw%u6qoLJk%cN23iFF;Cx^tXrI-Nsj3?h4RthTMJr_=G3E zsM1W+Kbko`oOQ1tgIUA60y`>43eA@-Mrt2=@JPn5dL+^89avrt7?`hw+WnsaFsY<|x@Jn)$Vow>u^m0)F9caalWNvI5Z zx(KlZjTP~;EXZ=iw+^6Fc~=2E1ZaT4H+O`7TN&Fe43%_WVs3Dvko_9hp+NSU-$Cuj z_9nWFhRq0CH?VW(@f+)+yEwDM{7ySop$-R|O6w43IPq9+PTABN>P9y8Ll6uMB!#>N z>ng8I+t!J1#Ul83TBqOC#-9ZV-bsz<-l3i zf_{1axcp8q6T1lF#Z1DyM1YYdh-XHYj9o&>TNyJ}DFWx(>8 z;2hXnxyl_3Y9A~_WTUF~(x`2c81O0Q9KeU3R27K^=Su;lKVU@Oc4UEqA&>qpqHxV3 zSBWg5)H$(2*k1$k7gJreE<|Jte9U=|J|T0(6CQ|w+hoHbnl!B8NCwg(Y-nCPDhR=- zB#_z|~2>8+)kZ(xsuDP+ zeN}z+)~_>QWCHK2j=DOUE1djK4v~`rtRtU1~2Aslj>TwT{frgCn6@`lvfX z*y*&?(_3&ddU}-UXO3VNN15x=CuT1j$$dT2qpYKzm5vly+Anb{S+ifA#)+dGg*{F! z*Ag)YnVpGIUm{O@z${>iq&$H0&~8(Vg`k@(%MzEmRCkT+cfzIQ;_^g zX%Wq8KxM$aWH}FuH#n0DA3Y0Y3o=OYWqj^9b0(31+e2j=#9D*=`QOLh$64lblQBkI z08U(3Gz9;?j>>X2nI|gwY}gxAI#b<+GkwPm5NwFmwjabs%RN}7!bv_p3F}R)Q0j|e ztbYt5+?Cb}WZ&qpatcabIVo$3p%~Xbh%K?t7f%|tWxEc=ks zzHuZWC-rktE}C&NJeo68H<^Q4lh-=D7T<7jS!-oMAG9D+MlZ9Ur}UxHpTYN1Jr6H~ zkD>-Kl%-ZN(!*JlLo^|hd3;IZ%+okIg?um5I!$HbtDYGaplIgm^A8mE7vWrgJ~A=O z!Hk@24|Y^gcu4l5VXsdU*~IKPrVzO1*h%fnlmjSuK+ef=NqmAJlHHM-PR9Wp7!ic7 z?T}g+?ZOoi0{}dX9R5cdg@kz}ZI^i0R@LW$SwP27nuT`ZX4_Ek2J%SqBZe}qOHo5w z!2=K_l!l(yU_es?O{92&pCE}Ow)RkA&p(8fdKT8|S=hAf%tc;#opF2+fFL{_==(jw#J{5%QYcO1dh98AQPE@_a&Ia+B5|go{jz)Y ziSj=RWTiJ6eiiWfW+|o3hsV}H{KiHs6>S3{3U{4Q+}YIAMKqJ1|yv+>l4t*mv+5 zpJ|*S9&btCCfp{uPkP z;^9{BKLJ~YRxbf{T?Kf~6F~R1BLeIU!1ye=Z4Q8)4!1U?{aln`YXEj`XB)h%e{Qk?&ndyKFj`=)}gnUyA%vGjn_|xXn^9FU2#b^D&JF*X7Y5vp zPk?dRsxp`25Y%E2(j8!B3g>WZ;o~9pn0k0mHtfDqolq}iI1wF0HWTD`ok52K|6^rz zlu2{g?A5(#jk$0p{yb@xL@T>bu5%{Um& z4UU@|Timeh>Yw)LziWu!PkS6ti%$y#BVU3?BFhWcHdfa*x8A=~S^IeP<_Fi;Hsegg z$C)Pwz48KZbk%RRy@m^GacAv*J2nV#J%lC7Gr~c+Mg9-Rt~kSeAwpU}pTJ)-H%FW> z4#yNKiq+8GndXU&dOc*Cwgq}WecUYOP64^?!qeArih=$a1&?B#2S0*hK?DS{$>uTg z>DV_ZSwT|H!Rm$kf7o!#S@(Xm(E+h7s^!@H0p$pk%SOYkh1X%ku!Gqhg>CX+rTUr~Q- zsz7nP>yMSs<~0rVK=a4IY#=*D8kOH@iuRjnKIG_-#)1{Z0`zd$CQY(KQAM0z!YzCz z`K5>(!V-14ucRQwZD_>c$fBgSfsQu9RaD?2ssv@?0Hu3oc&wR*Q2`r4SlTFN8LGBN_7bLFnYx`Y>b^G127Oud7j$xgeyx#CB-RW+F|f5AXq>R z;Fv%y&Qn^gDx`aIt#-r z{f0(82OSeaJ7~~tMt!EAPqqiOy&QagqXK$;=#PFO$^qXE=ms+~p-*#gV+?$$z-`ga zN^eN}WpaE-Z$bxv8nhaBLO6Adj~tfz9Qzrkg&ej%AI|p|NN@CJd0%t9uY5F1$J)I@ zG>i5>r4-|s3m2mdA`B-m2HDBNotcXYiB2nEJPW;fK4T@C7vn+XVJVsuZ?p>ge;Go< zJ&@TyC+!Tx3i-Tggs>v`dW$jdzZzvh^EB3U0Z!3ra_I10!24qFM0A3BKz|G9VKJKL z)c<||Um`8AcwEh}KSv<-8XdPxTKuFl5M0Ma4|oH^VAr?{v<^@t?dQWAPyG`*?1DM~ zWLl~E6Rt(#QUC;@4oA8i;Cbq7a1RVEAPCT@2N;`BvQF9sRIo3+G0R%7S%-eda{Sgy z!!X=|jpzs3BnpyRFcA^w`ewqpSswMqS_G-X7>)1}mo2(Mg17=d617Ipz(?Zc7XB!n zBBDOu9-hdKw!iQKKlJ2>lQ`xaQ{$xb#qH=&>aCe5TpG5Lc)qQ;* z4*pOr(G88#l~``aa&=hBy^7d;!||#iE>pGN{Dcq^d@nsCbzhn^|8Y7ybkK(1a8o0e zof{(5dPX>18d(MG6OdvbQ$lXjG>>fpX1NPA;aN@S2lgh>`-l>9TH_47JJ*e~$qEZ6 zk78k5Z?J2WBY!Q~=3QItvFNJZFr)(wBC){YM?jbUUlkb#n1vJVtBC7Vi zj@xmC`X`$vRvQ6Ac9?RLj|oS`o#yso*`y$z2&^&Et?W8A&L+Yi%U-O47bfx*Z!liViABeDitfvYa~Xpa|R2aqKId*QJG|IMHnGI?=4Eg(m+H&S%4Ads5! z8Q2(kMYM#K@SpJUPE6TkL;%n`4q&#vls8~B5tp&AQSvM$&rwpORpj?fTCmzw8sHOc0=&YWO=nc&<4c%BYb@PmAneQ3z7EErNR?%SWkC^l=Jb{Je zs}JoF=*qGgH0z${2TJ6wY{NI%6JQC)VK;0xfRo}{V#zvo3Olfk_`OUSLYACI)}bER zRaixequdiYm7O9-mt48vJmQCl+p+U$dkUHJZ_bmLR$yb1N`kE==&8K!b*Rw9dfLAL zT3PlTN;$>r{JSjO`VlvOy02MxT(D{2-E*N1$e9|S-ze~i$b#D{S+Q)=O^qrr8Fn>} zylKZb5G+oPzJtm(LNR-l2BOIgVVe=7Gcpc^G!^LVp5HZF+af6^4Wkcd*p( z-ML;S)X0uuw-9vAU_%jl-BZ%Ho{m}_QGi>KUdM$In9#RE1c~XbpMzhGlNDHDdPU#A z;AANX+g+Z*mZRIo@9zkuft^#f8Dp4OS2fQCEJTiP9t)7$D4LAcm${VCmJD}nWEV+^ z@N+PP^>`o*0>V$ZD2i8Bw^_grDW_qJlP-w*A%v-#a6dS>C=^6GMew7X z!d_~>?w*~#kNm!mJ{4g2ES?A)B+Wmjp6T$51Ba3?a2Y=a6gpbGg(o0T8U+eh>t`JJ zDQ>oI;K?09mT_wbVG-QbK^))F`JJ7tW8inz$ziKZmIsmUX^0$M1i=+6e!J4Ks=IXU zx10O4cj2z{d&N(CV?0S%HQ>obyo9)M2(E&+a0GaG!0#H0Lm=pB6%KT);=&wvUemiK zui~?b^^bFv^nT~x(^^nBi*on5saG^hgSN+pHb6IWjS?gxr$9a;6Jj5QWIf>Gj0!W4 z_2@mh0K*}Hdt5vrw~LVg{zE#PdAa-S^x>o<9~%uV6747#-d$v{ND{eBpIjoLMO=Fc zDVA+d36>z(xc5@W65@~(PhDfrC1ns+l;I$gyit5T3!|Sd7;vwYw?PYT@_&6F0WR#U z+P!Su^!>K5*KJ&nOO!NSJanvdv=-G+n5kqX@`m#^j8m8`A5xCypBN@&4+K~>H%#s# zFp@8JfFxG;+`~9?&u+kum{>vL`hTPqBvygGhSn#|KaUeN`1!x;o}Ip;T$rY6>sVp1 zkI2A7=!TVP41@r&y1Df+`#Qb;Atki4V)###J7WKz(a`l0m0`Hhn={XdS}@Vx*4 literal 0 HcmV?d00001 diff --git a/SSG/markdown/__pycache__/util.cpython-310.pyc b/SSG/markdown/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8528a58bcec0574e226dc628917a86387217d066 GIT binary patch literal 11595 zcmbVS>ysSURqxl#_Rc=ET0JaTd30oK?Z~TC2z9c2<4JOc@Z6q2G06i`5b4^-tp;6n=d&|j#6;)6fKCsz^kJEy1T zvAYgc%v9gL-S^&e&pqed^ST{QO%*i!-h1)2%1=s~_8;^x{Il?I30HJU*EFG(HK7Y5 z(91gCjk3XavuxsS238|i&gs<03i6Gy@>s(z+x(mh#v6rlLD$B$nkMq!*UJ-PtUM{~ z@|2tuI>fyW^?Dsf%XF{h z$hamBl#h;U(iVo8c~2Jy#q53Uo?d=TJS`53Blq?4F)=TWiQ}jp7mte*_qFolX#0ft zB;K45pAw(On@@~u*EQjY&)nC^F$Vz;+KcY}rjXf1h z;jDLnX0xhLU5$O&jAG?EbA%tz+uq(TiEXKBrKXI}7E6U!JRhs|o9JB)mE$>8x$ShM zry>IMMqByLnzvbxWk+s}K*0oZ+mA%pAcO#Ep{*1s7j&G)0489(=5+$E=`8yV(6E-m zAW97_f1A4$=L=UGeylpqYTc{Uo7sfBI-^U+d+vpz<28k|6nS37d8Hl6eF0a!iVS2F zpSC#p@=QoDFo>|e4@h;a6!oOg^cqscZAf#`NOIfCYemU;pxSN)l$Gd_ ztAy=lT>N}8<_V!dB2-lDR|1*jyvP?aDXa%!W%I3e7|Ud=H%DR$e=9LWkl0mU1|pJi zQmFd1ir1nIljKovD~WrEVJfj8Q;o!^KZEO8T%X1DIb6@-dOpe5r661|m}Er1=1AoDa&F80=esnHISe8p>Sc~N3l!k`C6h{ReCMF)t+ zNiKbv_p1stNsPcx@`0?$COS2|xSp5|ua(%%FsdlO6({+y9k<$aU#A_0d%83mTdu~0 z%J+KaqPRokiqpAbk=U`kgIy>+yd(-F2H@wwS5Kg-lH3*u8-A!8AL?e;-VD0k?K}1J zUE|59YtMc4+?g-D`E;>tdW}}Mu-&~=?~2XyXe!jZ-FkPkj(X838f~aB?O(sew}1OF zo=rUe```XmdjHXn!Q8QicPH|@vO@BZ{a`>e>big{nnw|98!#iAIW-9-Z3afeysPP2 z)54P$Sa-D@Z6hZvk$X>v>ddbcjSpz}2V}4m0efFh>;_~{c(K<#l&P!|NC0^Sm!hJs zis)C&CuY=%lF6P-yMD78Cgaexp^5{4U0p?|@@)Uv&44D8&?l_gPS&9ty{goC@Oy)kSb# zjl7yX&sBQ7Bzo|qm^-;vNAKRaZ9fQ{b?GSC2)6*&Z)W<$dHqF)l;s6CnlK*d-_`!mip>X*&-Y9nfOj-s+tGiZf0KY& zDRIZ$~vs4Q!t>LdlQxUw0@Eg2N8#NJNn9wipl65Dl2Rl066yO%kD zV|l6;CHYNsu0=%?3Jom6{(`~;E7X^PHw&ctB5KKj0TwQGl(ei#6qYfg8~Eq{XFAvY z+@M$K0(7hwL>Eh`2&ln}K_>=_B8zHM~Euqdhbqf%luJ3woD6n_HTCLRkIh z9G;&Rc|7atv#}+{cC@2_@eVW^B*XR$$mk7tF*0&I2QCopHAUMcf=;1_^)Q5$^@6r6 zIZIUtH|#C#M=x@scBKLaSKGmSwgHrk+o~BkYpP8~+PMa%^#&41wI$tV*px3(`#dHZ z^qx=q_90>L3VQCDv>C?36FZ7@M&vkOAru#t3e`L&o_G9ehEo+*-wm6tq{iqaMrklN zPqLC&gjO+^STQgwn;jV?dZ%o_hU(9(K{My7w34#X3=_RsHp6N)(a)({VAr+oi3P7o zXc2AX5W&tnPbz>c_@Eee7vvlY9_9^37sfglgm5sE!T@pXn;cWRJUxnYd5Rg5Q7@G? z7CDx-X`c8BfHJJNNRS>!QOHf|mTv1aD5*vlmmet~%_HiGR24}nOW`F{gubIeb_$Ry z4yGO%cQs-1yTkm>6c)eB@w-p)yPU|=JCfDnSa)(Ug2N-7N+@LML;b04?kU*p7eCZ5 zbgid?7?q2t)Z!P(6hr6&c%i6TOWVIByT5`}B1gJC)R9$M%4RBn=wZ z=be^R`KFV9`aNK%vJZ9taD#oE2X85-sC94^oC z8yL&WESQr<9-Omv<0I>*D#rT?u8|)tm3c=dmX`Sl+Txye47}SUZW!GwH+i=?B;eQ` z`>l!$#5Mvk*uiJcoujZ04wBz!H=MvnI8pk5G$oWDuks+!g_Ulz|IO`;+0e2E68cyZ zkrLE~+aKWah(7?lh)|z3)HZH=mPKND30E|OLTZTTHHzgC$Ftv%OkR$|hF@7_#nnA~ zIa6WVb-z+~qI%d4ghP81R=(O+a50qw2bI*86eZ%=O#n^`uGAG*Vy`X3A z##~o~71!NwS?UhP{*bUE<6&#GyZGInWteEcL>-T!=r4mjNE2Z^j6XP(OU4mS5oV!U z8F8VAKy)#G3#jZsh+U*PR^eMx7-KX93zrP4>X*9!*q-0ClPOIu*>!K|yfU4vIg;#rDnCrgeU^XZ1AUCF=Hh6#dmR6S01+ zY?1ejJG6%w!#yu-F^>C~D2NH%Z7~V@OeNzl$Bke$_M&?CZQgP0%T)sSTO_&g*U~@_ zdnkeo*PNHvURie1_#|@Tx)(d!(jn#IRbmDJL8oYPT_TudHwl7m_C>^WBQ1w zk)YDGKqF71KyX7kQ-7rEB&7{AsU*RTSp{_HbZ|yP<#k?RzHd< zQ{|#3Gqfw+8_Sm$7GM7I^3}zgz<~rfF*jvr*YxVw(C`ynY$R;Gdwj6!U1Y70sm*?* zkH>Ja7(b6nCdNC)j=5tA{d*P(bZo@%Xf~{SK=3a77>G16gUfcCz;?>TKb?K0Gnb?{VHzi8z_r^6Ae0Hof>wDt?QK-=^Z%so2Gpb(~iG33`0M2>G`)>#mYm&?G6jJ;U?Q_al_4@Sm%4O$vHtII# z3CP7K_kz+_&{Er>(`v5=ex;XhV8!JHNcgeUVR?d&m1(XF`g!MeT9xWH%iC?(9-bL5 zN=R@qw^Qz|^!l6e! zrWrpYC_-M@nG0mI7T43<5@Z2X$+(n(Mcf%WNJb;{U=v6VOC~CS!lx2!YH7M*?qME| z;Ip|qJv=F|CE?{~Sdxw+U!MRI42s)P=M|g#4s8G?J4yflP5F6efBN!_`;d{c6c9KitJvJ6Hz3Ef zP`IXuK9~_7M`_AE_V6DNvA?ptl+OD9EIKb_fzC{L&+DIVM~C?B{5q`sqcoA*_UdG8-qR=u+puWyNMlWd9OhkX$9M=ob|b%I9@ogzePrs|TgW3L zcZ?t|9>dsk+}<0xVIL=np%H+SIqBo-hnST`2Jr_R7^!}rUeG!dJ5?}PHVcx8GbiPpxt%n07?Ba0CZ=DCgr9*6B2`X z!o#Qt{UJ}e!NJ5m{SjrRjFql^;qAL~Z{ICmL}Y+-UZe_RXb6|n704&u#L&bdAsiCT z0a}n8O2fPE(?lLx;o(uaNn1zchI$cdv|*x)DCisrIoxBJ{19ax8iQR&&$R0Vq~#HI zUW}aXL+b(RbV5+fb)RF+NY*_|9+HnR>@d{)EjWr~Fw*1*^&O!XA+^u$rF`oK5XA9? zv7-;|UZgWvnhf;_BNP3O`WxI+r9`r${usq`#H=5n(mmT})?RzD-`3@*xBK`?n7TTI zILyG?cTd8j0WYEw#F&MjWj)7hRW+KI!keC=WDHZ37&(^d*?kZo-qE?1ZP^fQ2=+(Q zX7|L;K)FJs%6?SuiyGz55WNyV$BCai21T$O*MpScB#u%qkRX~5tvE+t=LoE_MZ$fSu=O!%{Ksj5MUJYNQd zl(gg3;2*%RA2`A7nCo;vJpr zIgt;;(myyNok>R#e+NWUf{|rqt&@?7D3s(=V2aYZyD9becz1&+MV8da7jRfK%?Exs zO#jIEsC(#Umb)0RX+%$BEbD0|*&H4~9zY{vZQ#TOUnk)BPgw8az?hB!ar}S;8C}JE z_dA0%(|#MK2Ynrv?N#SvBKrZ!hCj~07D_+r;N7k8u@`L=$ZK}0gHII{<$>j$7F-mc zzNpK-0wIdhZ_f&8m8-be@1_-IE|1Rg2iK7Wa#Rcx`$FI~)`fRb5{1d4<+W}<$}O>@ zKWuE$&TFt1>TsxMiQ>q@HC@ z*e3oHg~ukR*)IgumeHqZtX(TUq25L{F_h#}LUn_laCnQahxn77ND=vxasGq{C)N>v zuAykd;!`Ksl9*qT*()Euvi#+RFS*N$Us+ss7nc`bSzK9LEt{)rub0h56vvlVmX}r* zhm)u7jhlKVOlw3+=d1yj2DjDhYAz+ Pgpr%cPv`Le?5z2}iQyd= literal 0 HcmV?d00001 diff --git a/SSG/markdown/blockparser.py b/SSG/markdown/blockparser.py new file mode 100644 index 0000000..b0ca4b1 --- /dev/null +++ b/SSG/markdown/blockparser.py @@ -0,0 +1,119 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import xml.etree.ElementTree as etree +from . import util + + +class State(list): + """ Track the current and nested state of the parser. + + This utility class is used to track the state of the BlockParser and + support multiple levels if nesting. It's just a simple API wrapped around + a list. Each time a state is set, that state is appended to the end of the + list. Each time a state is reset, that state is removed from the end of + the list. + + Therefore, each time a state is set for a nested block, that state must be + reset when we back out of that level of nesting or the state could be + corrupted. + + While all the methods of a list object are available, only the three + defined below need be used. + + """ + + def set(self, state): + """ Set a new state. """ + self.append(state) + + def reset(self): + """ Step back one step in nested state. """ + self.pop() + + def isstate(self, state): + """ Test that top (current) level is of given state. """ + if len(self): + return self[-1] == state + else: + return False + + +class BlockParser: + """ Parse Markdown blocks into an ElementTree object. + + A wrapper class that stitches the various BlockProcessors together, + looping through them and creating an ElementTree object. + """ + + def __init__(self, md): + self.blockprocessors = util.Registry() + self.state = State() + self.md = md + + def parseDocument(self, lines): + """ Parse a markdown document into an ElementTree. + + Given a list of lines, an ElementTree object (not just a parent + Element) is created and the root element is passed to the parser + as the parent. The ElementTree object is returned. + + This should only be called on an entire document, not pieces. + + """ + # Create a ElementTree from the lines + self.root = etree.Element(self.md.doc_tag) + self.parseChunk(self.root, '\n'.join(lines)) + return etree.ElementTree(self.root) + + def parseChunk(self, parent, text): + """ Parse a chunk of markdown text and attach to given etree node. + + While the ``text`` argument is generally assumed to contain multiple + blocks which will be split on blank lines, it could contain only one + block. Generally, this method would be called by extensions when + block parsing is required. + + The ``parent`` etree Element passed in is altered in place. + Nothing is returned. + + """ + self.parseBlocks(parent, text.split('\n\n')) + + def parseBlocks(self, parent, blocks): + """ Process blocks of markdown text and attach to given etree node. + + Given a list of ``blocks``, each blockprocessor is stepped through + until there are no blocks left. While an extension could potentially + call this method directly, it's generally expected to be used + internally. + + This is a public method as an extension may need to add/alter + additional BlockProcessors which call this method to recursively + parse a nested block. + + """ + while blocks: + for processor in self.blockprocessors: + if processor.test(parent, blocks[0]): + if processor.run(parent, blocks) is not False: + # run returns True or None + break diff --git a/SSG/markdown/blockprocessors.py b/SSG/markdown/blockprocessors.py new file mode 100644 index 0000000..3d0ff86 --- /dev/null +++ b/SSG/markdown/blockprocessors.py @@ -0,0 +1,623 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). + +CORE MARKDOWN BLOCKPARSER +=========================================================================== + +This parser handles basic parsing of Markdown blocks. It doesn't concern +itself with inline elements such as **bold** or *italics*, but rather just +catches blocks, lists, quotes, etc. + +The BlockParser is made up of a bunch of BlockProcessors, each handling a +different type of block. Extensions may add/replace/remove BlockProcessors +as they need to alter how markdown blocks are parsed. +""" + +import logging +import re +import xml.etree.ElementTree as etree +from . import util +from .blockparser import BlockParser + +logger = logging.getLogger('MARKDOWN') + + +def build_block_parser(md, **kwargs): + """ Build the default block parser used by Markdown. """ + parser = BlockParser(md) + parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100) + parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90) + parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80) + parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70) + parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60) + parser.blockprocessors.register(HRProcessor(parser), 'hr', 50) + parser.blockprocessors.register(OListProcessor(parser), 'olist', 40) + parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30) + parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20) + parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15) + parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10) + return parser + + +class BlockProcessor: + """ Base class for block processors. + + Each subclass will provide the methods below to work with the source and + tree. Each processor will need to define it's own ``test`` and ``run`` + methods. The ``test`` method should return True or False, to indicate + whether the current block should be processed by this processor. If the + test passes, the parser will call the processors ``run`` method. + + """ + + def __init__(self, parser): + self.parser = parser + self.tab_length = parser.md.tab_length + + def lastChild(self, parent): + """ Return the last child of an etree element. """ + if len(parent): + return parent[-1] + else: + return None + + def detab(self, text, length=None): + """ Remove a tab from the front of each line of the given text. """ + if length is None: + length = self.tab_length + newtext = [] + lines = text.split('\n') + for line in lines: + if line.startswith(' ' * length): + newtext.append(line[length:]) + elif not line.strip(): + newtext.append('') + else: + break + return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) + + def looseDetab(self, text, level=1): + """ Remove a tab from front of lines but allowing dedented lines. """ + lines = text.split('\n') + for i in range(len(lines)): + if lines[i].startswith(' '*self.tab_length*level): + lines[i] = lines[i][self.tab_length*level:] + return '\n'.join(lines) + + def test(self, parent, block): + """ Test for block type. Must be overridden by subclasses. + + As the parser loops through processors, it will call the ``test`` + method on each to determine if the given block of text is of that + type. This method must return a boolean ``True`` or ``False``. The + actual method of testing is left to the needs of that particular + block type. It could be as simple as ``block.startswith(some_string)`` + or a complex regular expression. As the block type may be different + depending on the parent of the block (i.e. inside a list), the parent + etree element is also provided and may be used as part of the test. + + Keywords: + + * ``parent``: A etree element which will be the parent of the block. + * ``block``: A block of text from the source which has been split at + blank lines. + """ + pass # pragma: no cover + + def run(self, parent, blocks): + """ Run processor. Must be overridden by subclasses. + + When the parser determines the appropriate type of a block, the parser + will call the corresponding processor's ``run`` method. This method + should parse the individual lines of the block and append them to + the etree. + + Note that both the ``parent`` and ``etree`` keywords are pointers + to instances of the objects which should be edited in place. Each + processor must make changes to the existing objects as there is no + mechanism to return new/different objects to replace them. + + This means that this method should be adding SubElements or adding text + to the parent, and should remove (``pop``) or add (``insert``) items to + the list of blocks. + + Keywords: + + * ``parent``: A etree element which is the parent of the current block. + * ``blocks``: A list of all remaining blocks of the document. + """ + pass # pragma: no cover + + +class ListIndentProcessor(BlockProcessor): + """ Process children of list items. + + Example: + * a list item + process this part + + or this part + + """ + + ITEM_TYPES = ['li'] + LIST_TYPES = ['ul', 'ol'] + + def __init__(self, *args): + super().__init__(*args) + self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length) + + def test(self, parent, block): + return block.startswith(' '*self.tab_length) and \ + not self.parser.state.isstate('detabbed') and \ + (parent.tag in self.ITEM_TYPES or + (len(parent) and parent[-1] is not None and + (parent[-1].tag in self.LIST_TYPES))) + + def run(self, parent, blocks): + block = blocks.pop(0) + level, sibling = self.get_level(parent, block) + block = self.looseDetab(block, level) + + self.parser.state.set('detabbed') + if parent.tag in self.ITEM_TYPES: + # It's possible that this parent has a 'ul' or 'ol' child list + # with a member. If that is the case, then that should be the + # parent. This is intended to catch the edge case of an indented + # list whose first member was parsed previous to this point + # see OListProcessor + if len(parent) and parent[-1].tag in self.LIST_TYPES: + self.parser.parseBlocks(parent[-1], [block]) + else: + # The parent is already a li. Just parse the child block. + self.parser.parseBlocks(parent, [block]) + elif sibling.tag in self.ITEM_TYPES: + # The sibling is a li. Use it as parent. + self.parser.parseBlocks(sibling, [block]) + elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES: + # The parent is a list (``ol`` or ``ul``) which has children. + # Assume the last child li is the parent of this block. + if sibling[-1].text: + # If the parent li has text, that text needs to be moved to a p + # The p must be 'inserted' at beginning of list in the event + # that other children already exist i.e.; a nested sublist. + p = etree.Element('p') + p.text = sibling[-1].text + sibling[-1].text = '' + sibling[-1].insert(0, p) + self.parser.parseChunk(sibling[-1], block) + else: + self.create_item(sibling, block) + self.parser.state.reset() + + def create_item(self, parent, block): + """ Create a new li and parse the block with it as the parent. """ + li = etree.SubElement(parent, 'li') + self.parser.parseBlocks(li, [block]) + + def get_level(self, parent, block): + """ Get level of indent based on list level. """ + # Get indent level + m = self.INDENT_RE.match(block) + if m: + indent_level = len(m.group(1))/self.tab_length + else: + indent_level = 0 + if self.parser.state.isstate('list'): + # We're in a tightlist - so we already are at correct parent. + level = 1 + else: + # We're in a looselist - so we need to find parent. + level = 0 + # Step through children of tree to find matching indent level. + while indent_level > level: + child = self.lastChild(parent) + if (child is not None and + (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)): + if child.tag in self.LIST_TYPES: + level += 1 + parent = child + else: + # No more child levels. If we're short of indent_level, + # we have a code block. So we stop here. + break + return level, parent + + +class CodeBlockProcessor(BlockProcessor): + """ Process code blocks. """ + + def test(self, parent, block): + return block.startswith(' '*self.tab_length) + + def run(self, parent, blocks): + sibling = self.lastChild(parent) + block = blocks.pop(0) + theRest = '' + if (sibling is not None and sibling.tag == "pre" and + len(sibling) and sibling[0].tag == "code"): + # The previous block was a code block. As blank lines do not start + # new code blocks, append this block to the previous, adding back + # linebreaks removed from the split into a list. + code = sibling[0] + block, theRest = self.detab(block) + code.text = util.AtomicString( + '{}\n{}\n'.format(code.text, util.code_escape(block.rstrip())) + ) + else: + # This is a new codeblock. Create the elements and insert text. + pre = etree.SubElement(parent, 'pre') + code = etree.SubElement(pre, 'code') + block, theRest = self.detab(block) + code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip())) + if theRest: + # This block contained unindented line(s) after the first indented + # line. Insert these lines as the first block of the master blocks + # list for future processing. + blocks.insert(0, theRest) + + +class BlockQuoteProcessor(BlockProcessor): + + RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') + + def test(self, parent, block): + return bool(self.RE.search(block)) and not util.nearing_recursion_limit() + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # Lines before blockquote + # Pass lines before blockquote in recursively for parsing first. + self.parser.parseBlocks(parent, [before]) + # Remove ``> `` from beginning of each line. + block = '\n'.join( + [self.clean(line) for line in block[m.start():].split('\n')] + ) + sibling = self.lastChild(parent) + if sibling is not None and sibling.tag == "blockquote": + # Previous block was a blockquote so set that as this blocks parent + quote = sibling + else: + # This is a new blockquote. Create a new parent element. + quote = etree.SubElement(parent, 'blockquote') + # Recursively parse block with blockquote as parent. + # change parser state so blockquotes embedded in lists use p tags + self.parser.state.set('blockquote') + self.parser.parseChunk(quote, block) + self.parser.state.reset() + + def clean(self, line): + """ Remove ``>`` from beginning of a line. """ + m = self.RE.match(line) + if line.strip() == ">": + return "" + elif m: + return m.group(2) + else: + return line + + +class OListProcessor(BlockProcessor): + """ Process ordered list blocks. """ + + TAG = 'ol' + # The integer (python string) with which the lists starts (default=1) + # Eg: If list is initialized as) + # 3. Item + # The ol tag will get starts="3" attribute + STARTSWITH = '1' + # Lazy ol - ignore startswith + LAZY_OL = True + # List of allowed sibling tags. + SIBLING_TAGS = ['ol', 'ul'] + + def __init__(self, parser): + super().__init__(parser) + # Detect an item (``1. item``). ``group(1)`` contains contents of item. + self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1)) + # Detect items on secondary lines. they can be of either list type. + self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' % + (self.tab_length - 1)) + # Detect indented (nested) items of either type + self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' % + (self.tab_length, self.tab_length * 2 - 1)) + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + # Check fr multiple items in one block. + items = self.get_items(blocks.pop(0)) + sibling = self.lastChild(parent) + + if sibling is not None and sibling.tag in self.SIBLING_TAGS: + # Previous block was a list item, so set that as parent + lst = sibling + # make sure previous item is in a p- if the item has text, + # then it isn't in a p + if lst[-1].text: + # since it's possible there are other children for this + # sibling, we can't just SubElement the p, we need to + # insert it as the first item. + p = etree.Element('p') + p.text = lst[-1].text + lst[-1].text = '' + lst[-1].insert(0, p) + # if the last item has a tail, then the tail needs to be put in a p + # likely only when a header is not followed by a blank line + lch = self.lastChild(lst[-1]) + if lch is not None and lch.tail: + p = etree.SubElement(lst[-1], 'p') + p.text = lch.tail.lstrip() + lch.tail = '' + + # parse first block differently as it gets wrapped in a p. + li = etree.SubElement(lst, 'li') + self.parser.state.set('looselist') + firstitem = items.pop(0) + self.parser.parseBlocks(li, [firstitem]) + self.parser.state.reset() + elif parent.tag in ['ol', 'ul']: + # this catches the edge case of a multi-item indented list whose + # first item is in a blank parent-list item: + # * * subitem1 + # * subitem2 + # see also ListIndentProcessor + lst = parent + else: + # This is a new list so create parent with appropriate tag. + lst = etree.SubElement(parent, self.TAG) + # Check if a custom start integer is set + if not self.LAZY_OL and self.STARTSWITH != '1': + lst.attrib['start'] = self.STARTSWITH + + self.parser.state.set('list') + # Loop through items in block, recursively parsing each with the + # appropriate parent. + for item in items: + if item.startswith(' '*self.tab_length): + # Item is indented. Parse with last item as parent + self.parser.parseBlocks(lst[-1], [item]) + else: + # New item. Create li and parse with it as parent + li = etree.SubElement(lst, 'li') + self.parser.parseBlocks(li, [item]) + self.parser.state.reset() + + def get_items(self, block): + """ Break a block into list items. """ + items = [] + for line in block.split('\n'): + m = self.CHILD_RE.match(line) + if m: + # This is a new list item + # Check first item for the start index + if not items and self.TAG == 'ol': + # Detect the integer value of first list item + INTEGER_RE = re.compile(r'(\d+)') + self.STARTSWITH = INTEGER_RE.match(m.group(1)).group() + # Append to the list + items.append(m.group(3)) + elif self.INDENT_RE.match(line): + # This is an indented (possibly nested) item. + if items[-1].startswith(' '*self.tab_length): + # Previous item was indented. Append to that item. + items[-1] = '{}\n{}'.format(items[-1], line) + else: + items.append(line) + else: + # This is another line of previous item. Append to that item. + items[-1] = '{}\n{}'.format(items[-1], line) + return items + + +class UListProcessor(OListProcessor): + """ Process unordered list blocks. """ + + TAG = 'ul' + + def __init__(self, parser): + super().__init__(parser) + # Detect an item (``1. item``). ``group(1)`` contains contents of item. + self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1)) + + +class HashHeaderProcessor(BlockProcessor): + """ Process Hash Headers. """ + + # Detect a header at start of any line in block + RE = re.compile(r'(?:^|\n)(?P#{1,6})(?P
(?:\\.|[^\\])*?)#*(?:\n|$)') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # All lines before header + after = block[m.end():] # All lines after header + if before: + # As the header was not the first line of the block and the + # lines before the header must be parsed first, + # recursively parse this lines as a block. + self.parser.parseBlocks(parent, [before]) + # Create header using named groups from RE + h = etree.SubElement(parent, 'h%d' % len(m.group('level'))) + h.text = m.group('header').strip() + if after: + # Insert remaining lines as first block for future parsing. + blocks.insert(0, after) + else: # pragma: no cover + # This should never happen, but just in case... + logger.warn("We've got a problem header: %r" % block) + + +class SetextHeaderProcessor(BlockProcessor): + """ Process Setext-style Headers. """ + + # Detect Setext-style header. Must be first 2 lines of block. + RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE) + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + lines = blocks.pop(0).split('\n') + # Determine level. ``=`` is 1 and ``-`` is 2. + if lines[1].startswith('='): + level = 1 + else: + level = 2 + h = etree.SubElement(parent, 'h%d' % level) + h.text = lines[0].strip() + if len(lines) > 2: + # Block contains additional lines. Add to master blocks for later. + blocks.insert(0, '\n'.join(lines[2:])) + + +class HRProcessor(BlockProcessor): + """ Process Horizontal Rules. """ + + # Python's re module doesn't officially support atomic grouping. However you can fake it. + # See https://stackoverflow.com/a/13577411/866026 + RE = r'^[ ]{0,3}(?=(?P(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$' + # Detect hr on any line of a block. + SEARCH_RE = re.compile(RE, re.MULTILINE) + + def test(self, parent, block): + m = self.SEARCH_RE.search(block) + if m: + # Save match object on class instance so we can use it later. + self.match = m + return True + return False + + def run(self, parent, blocks): + block = blocks.pop(0) + match = self.match + # Check for lines in block before hr. + prelines = block[:match.start()].rstrip('\n') + if prelines: + # Recursively parse lines before hr so they get parsed first. + self.parser.parseBlocks(parent, [prelines]) + # create hr + etree.SubElement(parent, 'hr') + # check for lines in block after hr. + postlines = block[match.end():].lstrip('\n') + if postlines: + # Add lines after hr to master blocks for later parsing. + blocks.insert(0, postlines) + + +class EmptyBlockProcessor(BlockProcessor): + """ Process blocks that are empty or start with an empty line. """ + + def test(self, parent, block): + return not block or block.startswith('\n') + + def run(self, parent, blocks): + block = blocks.pop(0) + filler = '\n\n' + if block: + # Starts with empty line + # Only replace a single line. + filler = '\n' + # Save the rest for later. + theRest = block[1:] + if theRest: + # Add remaining lines to master blocks for later. + blocks.insert(0, theRest) + sibling = self.lastChild(parent) + if (sibling is not None and sibling.tag == 'pre' and + len(sibling) and sibling[0].tag == 'code'): + # Last block is a codeblock. Append to preserve whitespace. + sibling[0].text = util.AtomicString( + '{}{}'.format(sibling[0].text, filler) + ) + + +class ReferenceProcessor(BlockProcessor): + """ Process link references. """ + RE = re.compile( + r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE + ) + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + id = m.group(1).strip().lower() + link = m.group(2).lstrip('<').rstrip('>') + title = m.group(5) or m.group(6) + self.parser.md.references[id] = (link, title) + if block[m.end():].strip(): + # Add any content after match back to blocks as separate block + blocks.insert(0, block[m.end():].lstrip('\n')) + if block[:m.start()].strip(): + # Add any content before match back to blocks as separate block + blocks.insert(0, block[:m.start()].rstrip('\n')) + return True + # No match. Restore block. + blocks.insert(0, block) + return False + + +class ParagraphProcessor(BlockProcessor): + """ Process Paragraph blocks. """ + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + block = blocks.pop(0) + if block.strip(): + # Not a blank block. Add to parent, otherwise throw it away. + if self.parser.state.isstate('list'): + # The parent is a tight-list. + # + # Check for any children. This will likely only happen in a + # tight-list when a header isn't followed by a blank line. + # For example: + # + # * # Header + # Line 2 of list item - not part of header. + sibling = self.lastChild(parent) + if sibling is not None: + # Insetrt after sibling. + if sibling.tail: + sibling.tail = '{}\n{}'.format(sibling.tail, block) + else: + sibling.tail = '\n%s' % block + else: + # Append to parent.text + if parent.text: + parent.text = '{}\n{}'.format(parent.text, block) + else: + parent.text = block.lstrip() + else: + # Create a regular paragraph + p = etree.SubElement(parent, 'p') + p.text = block.lstrip() diff --git a/SSG/markdown/core.py b/SSG/markdown/core.py new file mode 100644 index 0000000..f6a171c --- /dev/null +++ b/SSG/markdown/core.py @@ -0,0 +1,407 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import codecs +import sys +import logging +import importlib +from . import util +from .preprocessors import build_preprocessors +from .blockprocessors import build_block_parser +from .treeprocessors import build_treeprocessors +from .inlinepatterns import build_inlinepatterns +from .postprocessors import build_postprocessors +from .extensions import Extension +from .serializers import to_html_string, to_xhtml_string + +__all__ = ['Markdown', 'markdown', 'markdownFromFile'] + + +logger = logging.getLogger('MARKDOWN') + + +class Markdown: + """Convert Markdown to HTML.""" + + doc_tag = "div" # Element used to wrap document - later removed + + output_formats = { + 'html': to_html_string, + 'xhtml': to_xhtml_string, + } + + def __init__(self, **kwargs): + """ + Creates a new Markdown instance. + + Keyword arguments: + + * extensions: A list of extensions. + If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used + as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is + assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If + no class is specified, then a `makeExtension` function is called within the specified module. + * extension_configs: Configuration settings for extensions. + * output_format: Format of output. Supported formats are: + * "xhtml": Outputs XHTML style tags. Default. + * "html": Outputs HTML style tags. + * tab_length: Length of tabs in the source. Default: 4 + + """ + + self.tab_length = kwargs.get('tab_length', 4) + + self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', + '(', ')', '>', '#', '+', '-', '.', '!'] + + self.block_level_elements = [ + # Elements which are invalid to wrap in a `

` tag. + # See https://w3c.github.io/html/grouping-content.html#the-p-element + 'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', + 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', + 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul', + # Other elements which Markdown should not be mucking up the contents of. + 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', + 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', + 'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video' + ] + + self.registeredExtensions = [] + self.docType = "" + self.stripTopLevelTags = True + + self.build_parser() + + self.references = {} + self.htmlStash = util.HtmlStash() + self.registerExtensions(extensions=kwargs.get('extensions', []), + configs=kwargs.get('extension_configs', {})) + self.set_output_format(kwargs.get('output_format', 'xhtml')) + self.reset() + + def build_parser(self): + """ Build the parser from the various parts. """ + self.preprocessors = build_preprocessors(self) + self.parser = build_block_parser(self) + self.inlinePatterns = build_inlinepatterns(self) + self.treeprocessors = build_treeprocessors(self) + self.postprocessors = build_postprocessors(self) + return self + + def registerExtensions(self, extensions, configs): + """ + Register extensions with this instance of Markdown. + + Keyword arguments: + + * extensions: A list of extensions, which can either + be strings or objects. + * configs: A dictionary mapping extension names to config options. + + """ + for ext in extensions: + if isinstance(ext, str): + ext = self.build_extension(ext, configs.get(ext, {})) + if isinstance(ext, Extension): + ext.extendMarkdown(self) + logger.debug( + 'Successfully loaded extension "%s.%s".' + % (ext.__class__.__module__, ext.__class__.__name__) + ) + elif ext is not None: + raise TypeError( + 'Extension "{}.{}" must be of type: "{}.{}"'.format( + ext.__class__.__module__, ext.__class__.__name__, + Extension.__module__, Extension.__name__ + ) + ) + return self + + def build_extension(self, ext_name, configs): + """ + Build extension from a string name, then return an instance. + + First attempt to load an entry point. The string name must be registered as an entry point in the + `markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class. + If multiple distributions have registered the same name, the first one found is returned. + + If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and + return an instance. If no class is specified, import the module and call a `makeExtension` function and return + the Extension instance returned by that function. + """ + configs = dict(configs) + + entry_points = [ep for ep in util.get_installed_extensions() if ep.name == ext_name] + if entry_points: + ext = entry_points[0].load() + return ext(**configs) + + # Get class name (if provided): `path.to.module:ClassName` + ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '') + + try: + module = importlib.import_module(ext_name) + logger.debug( + 'Successfully imported extension module "%s".' % ext_name + ) + except ImportError as e: + message = 'Failed loading extension "%s".' % ext_name + e.args = (message,) + e.args[1:] + raise + + if class_name: + # Load given class name from module. + return getattr(module, class_name)(**configs) + else: + # Expect makeExtension() function to return a class. + try: + return module.makeExtension(**configs) + except AttributeError as e: + message = e.args[0] + message = "Failed to initiate extension " \ + "'%s': %s" % (ext_name, message) + e.args = (message,) + e.args[1:] + raise + + def registerExtension(self, extension): + """ This gets called by the extension """ + self.registeredExtensions.append(extension) + return self + + def reset(self): + """ + Resets all state variables so that we can start with a new text. + """ + self.htmlStash.reset() + self.references.clear() + + for extension in self.registeredExtensions: + if hasattr(extension, 'reset'): + extension.reset() + + return self + + def set_output_format(self, format): + """ Set the output format for the class instance. """ + self.output_format = format.lower().rstrip('145') # ignore num + try: + self.serializer = self.output_formats[self.output_format] + except KeyError as e: + valid_formats = list(self.output_formats.keys()) + valid_formats.sort() + message = 'Invalid Output Format: "%s". Use one of %s.' \ + % (self.output_format, + '"' + '", "'.join(valid_formats) + '"') + e.args = (message,) + e.args[1:] + raise + return self + + def is_block_level(self, tag): + """Check if the tag is a block level HTML tag.""" + if isinstance(tag, str): + return tag.lower().rstrip('/') in self.block_level_elements + # Some ElementTree tags are not strings, so return False. + return False + + def convert(self, source): + """ + Convert markdown to serialized XHTML or HTML. + + Keyword arguments: + + * source: Source text as a Unicode string. + + Markdown processing takes place in five steps: + + 1. A bunch of "preprocessors" munge the input text. + 2. BlockParser() parses the high-level structural elements of the + pre-processed text into an ElementTree. + 3. A bunch of "treeprocessors" are run against the ElementTree. One + such treeprocessor runs InlinePatterns against the ElementTree, + detecting inline markup. + 4. Some post-processors are run against the text after the ElementTree + has been serialized into text. + 5. The output is written to a string. + + """ + + # Fixup the source text + if not source.strip(): + return '' # a blank unicode string + + try: + source = str(source) + except UnicodeDecodeError as e: # pragma: no cover + # Customise error message while maintaining original trackback + e.reason += '. -- Note: Markdown only accepts unicode input!' + raise + + # Split into lines and run the line preprocessors. + self.lines = source.split("\n") + for prep in self.preprocessors: + self.lines = prep.run(self.lines) + + # Parse the high-level elements. + root = self.parser.parseDocument(self.lines).getroot() + + # Run the tree-processors + for treeprocessor in self.treeprocessors: + newRoot = treeprocessor.run(root) + if newRoot is not None: + root = newRoot + + # Serialize _properly_. Strip top-level tags. + output = self.serializer(root) + if self.stripTopLevelTags: + try: + start = output.index( + '<%s>' % self.doc_tag) + len(self.doc_tag) + 2 + end = output.rindex('' % self.doc_tag) + output = output[start:end].strip() + except ValueError as e: # pragma: no cover + if output.strip().endswith('<%s />' % self.doc_tag): + # We have an empty document + output = '' + else: + # We have a serious problem + raise ValueError('Markdown failed to strip top-level ' + 'tags. Document=%r' % output.strip()) from e + + # Run the text post-processors + for pp in self.postprocessors: + output = pp.run(output) + + return output.strip() + + def convertFile(self, input=None, output=None, encoding=None): + """Converts a markdown file and returns the HTML as a unicode string. + + Decodes the file using the provided encoding (defaults to utf-8), + passes the file content to markdown, and outputs the html to either + the provided stream or the file with provided name, using the same + encoding as the source file. The 'xmlcharrefreplace' error handler is + used when encoding the output. + + **Note:** This is the only place that decoding and encoding of unicode + takes place in Python-Markdown. (All other code is unicode-in / + unicode-out.) + + Keyword arguments: + + * input: File object or path. Reads from stdin if `None`. + * output: File object or path. Writes to stdout if `None`. + * encoding: Encoding of input and output files. Defaults to utf-8. + + """ + + encoding = encoding or "utf-8" + + # Read the source + if input: + if isinstance(input, str): + input_file = codecs.open(input, mode="r", encoding=encoding) + else: + input_file = codecs.getreader(encoding)(input) + text = input_file.read() + input_file.close() + else: + text = sys.stdin.read() + if not isinstance(text, str): # pragma: no cover + text = text.decode(encoding) + + text = text.lstrip('\ufeff') # remove the byte-order mark + + # Convert + html = self.convert(text) + + # Write to file or stdout + if output: + if isinstance(output, str): + output_file = codecs.open(output, "w", + encoding=encoding, + errors="xmlcharrefreplace") + output_file.write(html) + output_file.close() + else: + writer = codecs.getwriter(encoding) + output_file = writer(output, errors="xmlcharrefreplace") + output_file.write(html) + # Don't close here. User may want to write more. + else: + # Encode manually and write bytes to stdout. + html = html.encode(encoding, "xmlcharrefreplace") + try: + # Write bytes directly to buffer (Python 3). + sys.stdout.buffer.write(html) + except AttributeError: # pragma: no cover + # Probably Python 2, which works with bytes by default. + sys.stdout.write(html) + + return self + + +""" +EXPORTED FUNCTIONS +============================================================================= + +Those are the two functions we really mean to export: markdown() and +markdownFromFile(). +""" + + +def markdown(text, **kwargs): + """Convert a markdown string to HTML and return HTML as a unicode string. + + This is a shortcut function for `Markdown` class to cover the most + basic use case. It initializes an instance of Markdown, loads the + necessary extensions and runs the parser on the given text. + + Keyword arguments: + + * text: Markdown formatted text as Unicode or ASCII string. + * Any arguments accepted by the Markdown class. + + Returns: An HTML document as a string. + + """ + md = Markdown(**kwargs) + return md.convert(text) + + +def markdownFromFile(**kwargs): + """Read markdown code from a file and write it to a file or a stream. + + This is a shortcut function which initializes an instance of Markdown, + and calls the convertFile method rather than convert. + + Keyword arguments: + + * input: a file name or readable object. + * output: a file name or writable object. + * encoding: Encoding of input and output. + * Any arguments accepted by the Markdown class. + + """ + md = Markdown(**kwargs) + md.convertFile(kwargs.get('input', None), + kwargs.get('output', None), + kwargs.get('encoding', None)) diff --git a/SSG/markdown/extensions/__init__.py b/SSG/markdown/extensions/__init__.py new file mode 100644 index 0000000..2d8d72a --- /dev/null +++ b/SSG/markdown/extensions/__init__.py @@ -0,0 +1,86 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +from ..util import parseBoolValue + + +class Extension: + """ Base class for extensions to subclass. """ + + # Default config -- to be overridden by a subclass + # Must be of the following format: + # { + # 'key': ['value', 'description'] + # } + # Note that Extension.setConfig will raise a KeyError + # if a default is not set here. + config = {} + + def __init__(self, **kwargs): + """ Initiate Extension and set up configs. """ + self.setConfigs(kwargs) + + def getConfig(self, key, default=''): + """ Return a setting for the given key or an empty string. """ + if key in self.config: + return self.config[key][0] + else: + return default + + def getConfigs(self): + """ Return all configs settings as a dict. """ + return {key: self.getConfig(key) for key in self.config.keys()} + + def getConfigInfo(self): + """ Return all config descriptions as a list of tuples. """ + return [(key, self.config[key][1]) for key in self.config.keys()] + + def setConfig(self, key, value): + """ Set a config setting for `key` with the given `value`. """ + if isinstance(self.config[key][0], bool): + value = parseBoolValue(value) + if self.config[key][0] is None: + value = parseBoolValue(value, preserve_none=True) + self.config[key][0] = value + + def setConfigs(self, items): + """ Set multiple config settings given a dict or list of tuples. """ + if hasattr(items, 'items'): + # it's a dict + items = items.items() + for key, value in items: + self.setConfig(key, value) + + def extendMarkdown(self, md): + """ + Add the various processors and patterns to the Markdown Instance. + + This method must be overridden by every extension. + + Keyword arguments: + + * md: The Markdown instance. + + """ + raise NotImplementedError( + 'Extension "%s.%s" must define an "extendMarkdown"' + 'method.' % (self.__class__.__module__, self.__class__.__name__) + ) diff --git a/SSG/markdown/extensions/__pycache__/__init__.cpython-310.pyc b/SSG/markdown/extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e5ca6a0c329148b66a4725f01141f6cdfbe239 GIT binary patch literal 3365 zcmZ`*&2JmW6`x&F61DnBBD<*#q`_2x8?lL2vJu;YDj10+!%<=jM(H-Fz<^%u49S)E zgPxfcOMzIq2+^k{eItNeJ(m~%+*%ibsl>G zv<+fVw-Z4<+U9*a;GU40(LYiV(5K$>uHplJHp2xM@O~g!5@~_JX+ISL$b|zMO?ZOG zx85N1V)`hEfMF-+p^=(Wzi&wDpE(Cnpu~VSx}M*SiwiFhgJj^{zFVhW%;=%?JfA*J zCBKw$+w*zIrNq8~h|Ye}8;GFORrJ>O_T9}}+c$UU(=IQ}U!X~!}o@WcJhLr`j$Q-6%7y~tMBD&7uzkG+rg z68iu#b|qVU@J|JaA^mWJ-uGY{KlG$D{>6(eNmW8+dSbR*>RVF+YAv*g-(bj9Oe69W z8Ch>E3jvNRZ16pd0~Np`w7kq1NpeNg9`%#B9dx8ikAQ}Q#r=GtZf8}=!*(|RyzhyQ zJSSoS{QrNnzuWv9d_*>TL83&n2yWTTgSOdYA{nwxDLc(Z<6*Oi&*ns*W~&uIrdG@C z4YFGC$7SP*Gf^{Vml+gRxGO`T} zJM^zyr6Q&tg^ntSI|gL!NS)w}$MiWLz@>l;2Kg$+?xy=RnmfVh#&g z1@zeh<83bul|d$-YI<|AQ@~iqPdSgtu~)6CJ-jketfZMSs5Y+FO^=_iVfq(oTRK<2 zuKfVLezM+>1M*6nwgS@@I)!0j$3i=!`IC{(w^`=B|uGuz!3+QZH*gTR!!&Yr#o$+>s$mS(E$a*Yf(F40xGdtu^xp}gl# z4xQU#1xKg%K++>V99N4|7>oshgUDV;WyzFAK)lpyZtU~lyT{cpr=dO z7`ZTzN>??NqRt^aX4vZcwHp`=KgLH#W4M%KIQBkrowV(L8zk%2TWwddtfw_SO_a>_ z+tb$>1n*$mAowvEetL<1hjBZZ0I&{QYlme<#kWpx(!j zk1#1{dJ133<4f7EY_X~;Bm0&0(w^R7c1pw_^p5T=ezdVWwZBK0j?1~>4s9S0d1Lo2 zyw}9$Gq}Su+DBJFCt>>ROsDB*E*0o<$k9_It(BhONJwYA6(=#TS2HJ&0cyDy`+V$J zC&=$Y8*LoE#*wo*!z=eSo!dknyk!pD!J5=<)m6J@ty#+$(*a!4`I>X_Xt1s8+ANM= zn2Y*4b@m%R=qG|v6g<E4j*xfCcBXdQKZ zSvNPcg3Ed?s|&}ya+IjUqOa!cfe?wv7F#V-`CF~bX|R>SpXk_^R7^MY1Dy z!HU`H>`h-42DO)LnA83R2EA5_>&_CR9T3|0>&xO3yu_a{Wy`JBi_{DAi?fARizR-m zWr9pBYARhTgtofSHQ6|v@g<=P;gon2Ppe=SE1ys +for documentation. + +Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and + [Seemant Kulleen](http://www.kulleen.org/) + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +''' + +from . import Extension +from ..blockprocessors import BlockProcessor +from ..inlinepatterns import InlineProcessor +from ..util import AtomicString +import re +import xml.etree.ElementTree as etree + + +class AbbrExtension(Extension): + """ Abbreviation Extension for Python-Markdown. """ + + def extendMarkdown(self, md): + """ Insert AbbrPreprocessor before ReferencePreprocessor. """ + md.parser.blockprocessors.register(AbbrPreprocessor(md.parser), 'abbr', 16) + + +class AbbrPreprocessor(BlockProcessor): + """ Abbreviation Preprocessor - parse text for abbr references. """ + + RE = re.compile(r'^[*]\[(?P[^\]]*)\][ ]?:[ ]*\n?[ ]*(?P.*)$', re.MULTILINE) + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + ''' + Find and remove all Abbreviation references from the text. + Each reference is set as a new AbbrPattern in the markdown instance. + + ''' + block = blocks.pop(0) + m = self.RE.search(block) + if m: + abbr = m.group('abbr').strip() + title = m.group('title').strip() + self.parser.md.inlinePatterns.register( + AbbrInlineProcessor(self._generate_pattern(abbr), title), 'abbr-%s' % abbr, 2 + ) + if block[m.end():].strip(): + # Add any content after match back to blocks as separate block + blocks.insert(0, block[m.end():].lstrip('\n')) + if block[:m.start()].strip(): + # Add any content before match back to blocks as separate block + blocks.insert(0, block[:m.start()].rstrip('\n')) + return True + # No match. Restore block. + blocks.insert(0, block) + return False + + def _generate_pattern(self, text): + ''' + Given a string, returns an regex pattern to match that string. + + 'HTML' -> r'(?P<abbr>[H][T][M][L])' + + Note: we force each char as a literal match (in brackets) as we don't + know what they will be beforehand. + + ''' + chars = list(text) + for i in range(len(chars)): + chars[i] = r'[%s]' % chars[i] + return r'(?P<abbr>\b%s\b)' % (r''.join(chars)) + + +class AbbrInlineProcessor(InlineProcessor): + """ Abbreviation inline pattern. """ + + def __init__(self, pattern, title): + super().__init__(pattern) + self.title = title + + def handleMatch(self, m, data): + abbr = etree.Element('abbr') + abbr.text = AtomicString(m.group('abbr')) + abbr.set('title', self.title) + return abbr, m.start(0), m.end(0) + + +def makeExtension(**kwargs): # pragma: no cover + return AbbrExtension(**kwargs) diff --git a/SSG/markdown/extensions/admonition.py b/SSG/markdown/extensions/admonition.py new file mode 100644 index 0000000..cb8d901 --- /dev/null +++ b/SSG/markdown/extensions/admonition.py @@ -0,0 +1,170 @@ +""" +Admonition extension for Python-Markdown +======================================== + +Adds rST-style admonitions. Inspired by [rST][] feature with the same name. + +[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa + +See <https://Python-Markdown.github.io/extensions/admonition> +for documentation. + +Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/). + +All changes Copyright The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import BlockProcessor +import xml.etree.ElementTree as etree +import re + + +class AdmonitionExtension(Extension): + """ Admonition extension for Python-Markdown. """ + + def extendMarkdown(self, md): + """ Add Admonition to Markdown instance. """ + md.registerExtension(self) + + md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105) + + +class AdmonitionProcessor(BlockProcessor): + + CLASSNAME = 'admonition' + CLASSNAME_TITLE = 'admonition-title' + RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)') + RE_SPACES = re.compile(' +') + + def __init__(self, parser): + """Initialization.""" + + super().__init__(parser) + + self.current_sibling = None + self.content_indention = 0 + + def parse_content(self, parent, block): + """Get sibling admonition. + + Retrieve the appropriate sibling element. This can get tricky when + dealing with lists. + + """ + + old_block = block + the_rest = '' + + # We already acquired the block via test + if self.current_sibling is not None: + sibling = self.current_sibling + block, the_rest = self.detab(block, self.content_indent) + self.current_sibling = None + self.content_indent = 0 + return sibling, block, the_rest + + sibling = self.lastChild(parent) + + if sibling is None or sibling.get('class', '').find(self.CLASSNAME) == -1: + sibling = None + else: + # If the last child is a list and the content is sufficiently indented + # to be under it, then the content's sibling is in the list. + last_child = self.lastChild(sibling) + indent = 0 + while last_child: + if ( + sibling and block.startswith(' ' * self.tab_length * 2) and + last_child and last_child.tag in ('ul', 'ol', 'dl') + ): + + # The expectation is that we'll find an <li> or <dt>. + # We should get its last child as well. + sibling = self.lastChild(last_child) + last_child = self.lastChild(sibling) if sibling else None + + # Context has been lost at this point, so we must adjust the + # text's indentation level so it will be evaluated correctly + # under the list. + block = block[self.tab_length:] + indent += self.tab_length + else: + last_child = None + + if not block.startswith(' ' * self.tab_length): + sibling = None + + if sibling is not None: + indent += self.tab_length + block, the_rest = self.detab(old_block, indent) + self.current_sibling = sibling + self.content_indent = indent + + return sibling, block, the_rest + + def test(self, parent, block): + + if self.RE.search(block): + return True + else: + return self.parse_content(parent, block)[0] is not None + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + + if m: + if m.start() > 0: + self.parser.parseBlocks(parent, [block[:m.start()]]) + block = block[m.end():] # removes the first line + block, theRest = self.detab(block) + else: + sibling, block, theRest = self.parse_content(parent, block) + + if m: + klass, title = self.get_class_and_title(m) + div = etree.SubElement(parent, 'div') + div.set('class', '{} {}'.format(self.CLASSNAME, klass)) + if title: + p = etree.SubElement(div, 'p') + p.text = title + p.set('class', self.CLASSNAME_TITLE) + else: + # Sibling is a list item, but we need to wrap it's content should be wrapped in <p> + if sibling.tag in ('li', 'dd') and sibling.text: + text = sibling.text + sibling.text = '' + p = etree.SubElement(sibling, 'p') + p.text = text + + div = sibling + + self.parser.parseChunk(div, block) + + if theRest: + # This block contained unindented line(s) after the first indented + # line. Insert these lines as the first block of the master blocks + # list for future processing. + blocks.insert(0, theRest) + + def get_class_and_title(self, match): + klass, title = match.group(1).lower(), match.group(2) + klass = self.RE_SPACES.sub(' ', klass) + if title is None: + # no title was provided, use the capitalized classname as title + # e.g.: `!!! note` will render + # `<p class="admonition-title">Note</p>` + title = klass.split(' ', 1)[0].capitalize() + elif title == '': + # an explicit blank title should not be rendered + # e.g.: `!!! warning ""` will *not* render `p` with a title + title = None + return klass, title + + +def makeExtension(**kwargs): # pragma: no cover + return AdmonitionExtension(**kwargs) diff --git a/SSG/markdown/extensions/attr_list.py b/SSG/markdown/extensions/attr_list.py new file mode 100644 index 0000000..9a67551 --- /dev/null +++ b/SSG/markdown/extensions/attr_list.py @@ -0,0 +1,166 @@ +""" +Attribute List Extension for Python-Markdown +============================================ + +Adds attribute list syntax. Inspired by +[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s +feature of the same name. + +See <https://Python-Markdown.github.io/extensions/attr_list> +for documentation. + +Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/). + +All changes Copyright 2011-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..treeprocessors import Treeprocessor +import re + + +def _handle_double_quote(s, t): + k, v = t.split('=', 1) + return k, v.strip('"') + + +def _handle_single_quote(s, t): + k, v = t.split('=', 1) + return k, v.strip("'") + + +def _handle_key_value(s, t): + return t.split('=', 1) + + +def _handle_word(s, t): + if t.startswith('.'): + return '.', t[1:] + if t.startswith('#'): + return 'id', t[1:] + return t, t + + +_scanner = re.Scanner([ + (r'[^ =]+=".*?"', _handle_double_quote), + (r"[^ =]+='.*?'", _handle_single_quote), + (r'[^ =]+=[^ =]+', _handle_key_value), + (r'[^ =]+', _handle_word), + (r' ', None) +]) + + +def get_attrs(str): + """ Parse attribute list and return a list of attribute tuples. """ + return _scanner.scan(str)[0] + + +def isheader(elem): + return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] + + +class AttrListTreeprocessor(Treeprocessor): + + BASE_RE = r'\{\:?[ ]*([^\}\n ][^\}\n]*)[ ]*\}' + HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE)) + BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE)) + INLINE_RE = re.compile(r'^{}'.format(BASE_RE)) + NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff' + r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d' + r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff' + r'\uf900-\ufdcf\ufdf0-\ufffd' + r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+') + + def run(self, doc): + for elem in doc.iter(): + if self.md.is_block_level(elem.tag): + # Block level: check for attrs on last line of text + RE = self.BLOCK_RE + if isheader(elem) or elem.tag in ['dt', 'td', 'th']: + # header, def-term, or table cell: check for attrs at end of element + RE = self.HEADER_RE + if len(elem) and elem.tag == 'li': + # special case list items. children may include a ul or ol. + pos = None + # find the ul or ol position + for i, child in enumerate(elem): + if child.tag in ['ul', 'ol']: + pos = i + break + if pos is None and elem[-1].tail: + # use tail of last child. no ul or ol. + m = RE.search(elem[-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[-1].tail = elem[-1].tail[:m.start()] + elif pos is not None and pos > 0 and elem[pos-1].tail: + # use tail of last child before ul or ol + m = RE.search(elem[pos-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[pos-1].tail = elem[pos-1].tail[:m.start()] + elif elem.text: + # use text. ul is first child. + m = RE.search(elem.text) + if m: + self.assign_attrs(elem, m.group(1)) + elem.text = elem.text[:m.start()] + elif len(elem) and elem[-1].tail: + # has children. Get from tail of last child + m = RE.search(elem[-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[-1].tail = elem[-1].tail[:m.start()] + if isheader(elem): + # clean up trailing #s + elem[-1].tail = elem[-1].tail.rstrip('#').rstrip() + elif elem.text: + # no children. Get from text. + m = RE.search(elem.text) + if m: + self.assign_attrs(elem, m.group(1)) + elem.text = elem.text[:m.start()] + if isheader(elem): + # clean up trailing #s + elem.text = elem.text.rstrip('#').rstrip() + else: + # inline: check for attrs at start of tail + if elem.tail: + m = self.INLINE_RE.match(elem.tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem.tail = elem.tail[m.end():] + + def assign_attrs(self, elem, attrs): + """ Assign attrs to element. """ + for k, v in get_attrs(attrs): + if k == '.': + # add to class + cls = elem.get('class') + if cls: + elem.set('class', '{} {}'.format(cls, v)) + else: + elem.set('class', v) + else: + # assign attr k with v + elem.set(self.sanitize_name(k), v) + + def sanitize_name(self, name): + """ + Sanitize name as 'an XML Name, minus the ":"'. + See https://www.w3.org/TR/REC-xml-names/#NT-NCName + """ + return self.NAME_RE.sub('_', name) + + +class AttrListExtension(Extension): + def extendMarkdown(self, md): + md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8) + md.registerExtension(self) + + +def makeExtension(**kwargs): # pragma: no cover + return AttrListExtension(**kwargs) diff --git a/SSG/markdown/extensions/codehilite.py b/SSG/markdown/extensions/codehilite.py new file mode 100644 index 0000000..a1e9dc3 --- /dev/null +++ b/SSG/markdown/extensions/codehilite.py @@ -0,0 +1,330 @@ +""" +CodeHilite Extension for Python-Markdown +======================================== + +Adds code/syntax highlighting to standard Python-Markdown code blocks. + +See <https://Python-Markdown.github.io/extensions/code_hilite> +for documentation. + +Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..treeprocessors import Treeprocessor +from ..util import parseBoolValue + +try: # pragma: no cover + from pygments import highlight + from pygments.lexers import get_lexer_by_name, guess_lexer + from pygments.formatters import get_formatter_by_name + from pygments.util import ClassNotFound + pygments = True +except ImportError: # pragma: no cover + pygments = False + + +def parse_hl_lines(expr): + """Support our syntax for emphasizing certain lines of code. + + expr should be like '1 2' to emphasize lines 1 and 2 of a code block. + Returns a list of ints, the line numbers to emphasize. + """ + if not expr: + return [] + + try: + return list(map(int, expr.split())) + except ValueError: # pragma: no cover + return [] + + +# ------------------ The Main CodeHilite Class ---------------------- +class CodeHilite: + """ + Determine language of source code, and pass it on to the Pygments highlighter. + + Usage: + code = CodeHilite(src=some_code, lang='python') + html = code.hilite() + + Arguments: + * src: Source string or any object with a .readline attribute. + + * lang: String name of Pygments lexer to use for highlighting. Default: `None`. + + * guess_lang: Auto-detect which lexer to use. Ignored if `lang` is set to a valid + value. Default: `True`. + + * use_pygments: Pass code to pygments for code highlighting. If `False`, the code is + instead wrapped for highlighting by a JavaScript library. Default: `True`. + + * pygments_formatter: The name of a Pygments formatter or a formatter class used for + highlighting the code blocks. Default: `html`. + + * linenums: An alias to Pygments `linenos` formatter option. Default: `None`. + + * css_class: An alias to Pygments `cssclass` formatter option. Default: 'codehilite'. + + * lang_prefix: Prefix prepended to the language. Default: "language-". + + Other Options: + Any other options are accepted and passed on to the lexer and formatter. Therefore, + valid options include any options which are accepted by the `html` formatter or + whichever lexer the code's language uses. Note that most lexers do not have any + options. However, a few have very useful options, such as PHP's `startinline` option. + Any invalid options are ignored without error. + + Formatter options: https://pygments.org/docs/formatters/#HtmlFormatter + Lexer Options: https://pygments.org/docs/lexers/ + + Additionally, when Pygments is enabled, the code's language is passed to the + formatter as an extra option `lang_str`, whose value being `{lang_prefix}{lang}`. + This option has no effect to the Pygments's builtin formatters. + + Advanced Usage: + code = CodeHilite( + src = some_code, + lang = 'php', + startinline = True, # Lexer option. Snippet does not start with `<?php`. + linenostart = 42, # Formatter option. Snippet starts on line 42. + hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50. + linenos = 'inline' # Formatter option. Avoid alignment problems. + ) + html = code.hilite() + + """ + + def __init__(self, src, **options): + self.src = src + self.lang = options.pop('lang', None) + self.guess_lang = options.pop('guess_lang', True) + self.use_pygments = options.pop('use_pygments', True) + self.lang_prefix = options.pop('lang_prefix', 'language-') + self.pygments_formatter = options.pop('pygments_formatter', 'html') + + if 'linenos' not in options: + options['linenos'] = options.pop('linenums', None) + if 'cssclass' not in options: + options['cssclass'] = options.pop('css_class', 'codehilite') + if 'wrapcode' not in options: + # Override pygments default + options['wrapcode'] = True + # Disallow use of `full` option + options['full'] = False + + self.options = options + + def hilite(self, shebang=True): + """ + Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with + optional line numbers. The output should then be styled with css to + your liking. No styles are applied by default - only styling hooks + (i.e.: <span class="k">). + + returns : A string of html. + + """ + + self.src = self.src.strip('\n') + + if self.lang is None and shebang: + self._parseHeader() + + if pygments and self.use_pygments: + try: + lexer = get_lexer_by_name(self.lang, **self.options) + except ValueError: + try: + if self.guess_lang: + lexer = guess_lexer(self.src, **self.options) + else: + lexer = get_lexer_by_name('text', **self.options) + except ValueError: # pragma: no cover + lexer = get_lexer_by_name('text', **self.options) + if not self.lang: + # Use the guessed lexer's language instead + self.lang = lexer.aliases[0] + lang_str = f'{self.lang_prefix}{self.lang}' + if isinstance(self.pygments_formatter, str): + try: + formatter = get_formatter_by_name(self.pygments_formatter, **self.options) + except ClassNotFound: + formatter = get_formatter_by_name('html', **self.options) + else: + formatter = self.pygments_formatter(lang_str=lang_str, **self.options) + return highlight(self.src, lexer, formatter) + else: + # just escape and build markup usable by JS highlighting libs + txt = self.src.replace('&', '&') + txt = txt.replace('<', '<') + txt = txt.replace('>', '>') + txt = txt.replace('"', '"') + classes = [] + if self.lang: + classes.append('{}{}'.format(self.lang_prefix, self.lang)) + if self.options['linenos']: + classes.append('linenums') + class_str = '' + if classes: + class_str = ' class="{}"'.format(' '.join(classes)) + return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format( + self.options['cssclass'], + class_str, + txt + ) + + def _parseHeader(self): + """ + Determines language of a code block from shebang line and whether the + said line should be removed or left in place. If the shebang line + contains a path (even a single /) then it is assumed to be a real + shebang line and left alone. However, if no path is given + (e.i.: #!python or :::python) then it is assumed to be a mock shebang + for language identification of a code fragment and removed from the + code block prior to processing for code highlighting. When a mock + shebang (e.i: #!python) is found, line numbering is turned on. When + colons are found in place of a shebang (e.i.: :::python), line + numbering is left in the current state - off by default. + + Also parses optional list of highlight lines, like: + + :::python hl_lines="1 3" + """ + + import re + + # split text into lines + lines = self.src.split("\n") + # pull first line to examine + fl = lines.pop(0) + + c = re.compile(r''' + (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons + (?P<path>(?:/\w+)*[/ ])? # Zero or 1 path + (?P<lang>[\w#.+-]*) # The language + \s* # Arbitrary whitespace + # Optional highlight lines, single- or double-quote-delimited + (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))? + ''', re.VERBOSE) + # search first line for shebang + m = c.search(fl) + if m: + # we have a match + try: + self.lang = m.group('lang').lower() + except IndexError: # pragma: no cover + self.lang = None + if m.group('path'): + # path exists - restore first line + lines.insert(0, fl) + if self.options['linenos'] is None and m.group('shebang'): + # Overridable and Shebang exists - use line numbers + self.options['linenos'] = True + + self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines')) + else: + # No match + lines.insert(0, fl) + + self.src = "\n".join(lines).strip("\n") + + +# ------------------ The Markdown Extension ------------------------------- + + +class HiliteTreeprocessor(Treeprocessor): + """ Highlight source code in code blocks. """ + + def code_unescape(self, text): + """Unescape code.""" + text = text.replace("<", "<") + text = text.replace(">", ">") + # Escaped '&' should be replaced at the end to avoid + # conflicting with < and >. + text = text.replace("&", "&") + return text + + def run(self, root): + """ Find code blocks and store in htmlStash. """ + blocks = root.iter('pre') + for block in blocks: + if len(block) == 1 and block[0].tag == 'code': + local_config = self.config.copy() + code = CodeHilite( + self.code_unescape(block[0].text), + tab_length=self.md.tab_length, + style=local_config.pop('pygments_style', 'default'), + **local_config + ) + placeholder = self.md.htmlStash.store(code.hilite()) + # Clear codeblock in etree instance + block.clear() + # Change to p element which will later + # be removed when inserting raw html + block.tag = 'p' + block.text = placeholder + + +class CodeHiliteExtension(Extension): + """ Add source code highlighting to markdown codeblocks. """ + + def __init__(self, **kwargs): + # define default configs + self.config = { + 'linenums': [None, + "Use lines numbers. True|table|inline=yes, False=no, None=auto"], + 'guess_lang': [True, + "Automatic language detection - Default: True"], + 'css_class': ["codehilite", + "Set class name for wrapper <div> - " + "Default: codehilite"], + 'pygments_style': ['default', + 'Pygments HTML Formatter Style ' + '(Colorscheme) - Default: default'], + 'noclasses': [False, + 'Use inline styles instead of CSS classes - ' + 'Default false'], + 'use_pygments': [True, + 'Use Pygments to Highlight code blocks. ' + 'Disable if using a JavaScript library. ' + 'Default: True'], + 'lang_prefix': [ + 'language-', + 'Prefix prepended to the language when use_pygments is false. Default: "language-"' + ], + 'pygments_formatter': ['html', + 'Use a specific formatter for Pygments highlighting.' + 'Default: "html"', + ], + } + + for key, value in kwargs.items(): + if key in self.config: + self.setConfig(key, value) + else: + # manually set unknown keywords. + if isinstance(value, str): + try: + # Attempt to parse str as a bool value + value = parseBoolValue(value, preserve_none=True) + except ValueError: + pass # Assume it's not a bool value. Use as-is. + self.config[key] = [value, ''] + + def extendMarkdown(self, md): + """ Add HilitePostprocessor to Markdown instance. """ + hiliter = HiliteTreeprocessor(md) + hiliter.config = self.getConfigs() + md.treeprocessors.register(hiliter, 'hilite', 30) + + md.registerExtension(self) + + +def makeExtension(**kwargs): # pragma: no cover + return CodeHiliteExtension(**kwargs) diff --git a/SSG/markdown/extensions/def_list.py b/SSG/markdown/extensions/def_list.py new file mode 100644 index 0000000..17549f0 --- /dev/null +++ b/SSG/markdown/extensions/def_list.py @@ -0,0 +1,111 @@ +""" +Definition List Extension for Python-Markdown +============================================= + +Adds parsing of Definition Lists to Python-Markdown. + +See <https://Python-Markdown.github.io/extensions/definition_lists> +for documentation. + +Original code Copyright 2008 [Waylan Limberg](http://achinghead.com) + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import BlockProcessor, ListIndentProcessor +import xml.etree.ElementTree as etree +import re + + +class DefListProcessor(BlockProcessor): + """ Process Definition Lists. """ + + RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)') + NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + + raw_block = blocks.pop(0) + m = self.RE.search(raw_block) + terms = [term.strip() for term in + raw_block[:m.start()].split('\n') if term.strip()] + block = raw_block[m.end():] + no_indent = self.NO_INDENT_RE.match(block) + if no_indent: + d, theRest = (block, None) + else: + d, theRest = self.detab(block) + if d: + d = '{}\n{}'.format(m.group(2), d) + else: + d = m.group(2) + sibling = self.lastChild(parent) + if not terms and sibling is None: + # This is not a definition item. Most likely a paragraph that + # starts with a colon at the beginning of a document or list. + blocks.insert(0, raw_block) + return False + if not terms and sibling.tag == 'p': + # The previous paragraph contains the terms + state = 'looselist' + terms = sibling.text.split('\n') + parent.remove(sibling) + # Acquire new sibling + sibling = self.lastChild(parent) + else: + state = 'list' + + if sibling is not None and sibling.tag == 'dl': + # This is another item on an existing list + dl = sibling + if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]): + state = 'looselist' + else: + # This is a new list + dl = etree.SubElement(parent, 'dl') + # Add terms + for term in terms: + dt = etree.SubElement(dl, 'dt') + dt.text = term + # Add definition + self.parser.state.set(state) + dd = etree.SubElement(dl, 'dd') + self.parser.parseBlocks(dd, [d]) + self.parser.state.reset() + + if theRest: + blocks.insert(0, theRest) + + +class DefListIndentProcessor(ListIndentProcessor): + """ Process indented children of definition list items. """ + + # Definition lists need to be aware of all list types + ITEM_TYPES = ['dd', 'li'] + LIST_TYPES = ['dl', 'ol', 'ul'] + + def create_item(self, parent, block): + """ Create a new dd or li (depending on parent) and parse the block with it as the parent. """ + + dd = etree.SubElement(parent, 'dd') + self.parser.parseBlocks(dd, [block]) + + +class DefListExtension(Extension): + """ Add definition lists to Markdown. """ + + def extendMarkdown(self, md): + """ Add an instance of DefListProcessor to BlockParser. """ + md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85) + md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25) + + +def makeExtension(**kwargs): # pragma: no cover + return DefListExtension(**kwargs) diff --git a/SSG/markdown/extensions/extra.py b/SSG/markdown/extensions/extra.py new file mode 100644 index 0000000..909ba07 --- /dev/null +++ b/SSG/markdown/extensions/extra.py @@ -0,0 +1,58 @@ +""" +Python-Markdown Extra Extension +=============================== + +A compilation of various Python-Markdown extensions that imitates +[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/). + +Note that each of the individual extensions still need to be available +on your PYTHONPATH. This extension simply wraps them all up as a +convenience so that only one extension needs to be listed when +initiating Markdown. See the documentation for each individual +extension for specifics about that extension. + +There may be additional extensions that are distributed with +Python-Markdown that are not included here in Extra. Those extensions +are not part of PHP Markdown Extra, and therefore, not part of +Python-Markdown Extra. If you really would like Extra to include +additional extensions, we suggest creating your own clone of Extra +under a different name. You could also edit the `extensions` global +variable defined below, but be aware that such changes may be lost +when you upgrade to any future version of Python-Markdown. + +See <https://Python-Markdown.github.io/extensions/extra> +for documentation. + +Copyright The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension + +extensions = [ + 'fenced_code', + 'footnotes', + 'attr_list', + 'def_list', + 'tables', + 'abbr', + 'md_in_html' +] + + +class ExtraExtension(Extension): + """ Add various extensions to Markdown class.""" + + def __init__(self, **kwargs): + """ config is a dumb holder which gets passed to actual ext later. """ + self.config = kwargs + + def extendMarkdown(self, md): + """ Register extension instances. """ + md.registerExtensions(extensions, self.config) + + +def makeExtension(**kwargs): # pragma: no cover + return ExtraExtension(**kwargs) diff --git a/SSG/markdown/extensions/fenced_code.py b/SSG/markdown/extensions/fenced_code.py new file mode 100644 index 0000000..409166a --- /dev/null +++ b/SSG/markdown/extensions/fenced_code.py @@ -0,0 +1,174 @@ +""" +Fenced Code Extension for Python Markdown +========================================= + +This extension adds Fenced Code Blocks to Python-Markdown. + +See <https://Python-Markdown.github.io/extensions/fenced_code_blocks> +for documentation. + +Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/). + + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) +""" + + +from textwrap import dedent +from . import Extension +from ..preprocessors import Preprocessor +from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines +from .attr_list import get_attrs, AttrListExtension +from ..util import parseBoolValue +from ..serializers import _escape_attrib_html +import re + + +class FencedCodeExtension(Extension): + def __init__(self, **kwargs): + self.config = { + 'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"'] + } + super().__init__(**kwargs) + + def extendMarkdown(self, md): + """ Add FencedBlockPreprocessor to the Markdown instance. """ + md.registerExtension(self) + + md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25) + + +class FencedBlockPreprocessor(Preprocessor): + FENCED_BLOCK_RE = re.compile( + dedent(r''' + (?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence + ((\{(?P<attrs>[^\}\n]*)\})| # (optional {attrs} or + (\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang + (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines) + \n # newline (end of opening fence) + (?P<code>.*?)(?<=\n) # the code block + (?P=fence)[ ]*$ # closing fence + '''), + re.MULTILINE | re.DOTALL | re.VERBOSE + ) + + def __init__(self, md, config): + super().__init__(md) + self.config = config + self.checked_for_deps = False + self.codehilite_conf = {} + self.use_attr_list = False + # List of options to convert to bool values + self.bool_options = [ + 'linenums', + 'guess_lang', + 'noclasses', + 'use_pygments' + ] + + def run(self, lines): + """ Match and store Fenced Code Blocks in the HtmlStash. """ + + # Check for dependent extensions + if not self.checked_for_deps: + for ext in self.md.registeredExtensions: + if isinstance(ext, CodeHiliteExtension): + self.codehilite_conf = ext.getConfigs() + if isinstance(ext, AttrListExtension): + self.use_attr_list = True + + self.checked_for_deps = True + + text = "\n".join(lines) + while 1: + m = self.FENCED_BLOCK_RE.search(text) + if m: + lang, id, classes, config = None, '', [], {} + if m.group('attrs'): + id, classes, config = self.handle_attrs(get_attrs(m.group('attrs'))) + if len(classes): + lang = classes.pop(0) + else: + if m.group('lang'): + lang = m.group('lang') + if m.group('hl_lines'): + # Support hl_lines outside of attrs for backward-compatibility + config['hl_lines'] = parse_hl_lines(m.group('hl_lines')) + + # If config is not empty, then the codehighlite extension + # is enabled, so we call it to highlight the code + if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True): + local_config = self.codehilite_conf.copy() + local_config.update(config) + # Combine classes with cssclass. Ensure cssclass is at end + # as pygments appends a suffix under certain circumstances. + # Ignore ID as Pygments does not offer an option to set it. + if classes: + local_config['css_class'] = '{} {}'.format( + ' '.join(classes), + local_config['css_class'] + ) + highliter = CodeHilite( + m.group('code'), + lang=lang, + style=local_config.pop('pygments_style', 'default'), + **local_config + ) + + code = highliter.hilite(shebang=False) + else: + id_attr = lang_attr = class_attr = kv_pairs = '' + if lang: + prefix = self.config.get('lang_prefix', 'language-') + lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"' + if classes: + class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"' + if id: + id_attr = f' id="{_escape_attrib_html(id)}"' + if self.use_attr_list and config and not config.get('use_pygments', False): + # Only assign key/value pairs to code element if attr_list ext is enabled, key/value pairs + # were defined on the code block, and the `use_pygments` key was not set to True. The + # `use_pygments` key could be either set to False or not defined. It is omitted from output. + kv_pairs = ''.join( + f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments' + ) + code = self._escape(m.group('code')) + code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>' + + placeholder = self.md.htmlStash.store(code) + text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}' + else: + break + return text.split("\n") + + def handle_attrs(self, attrs): + """ Return tuple: (id, [list, of, classes], {configs}) """ + id = '' + classes = [] + configs = {} + for k, v in attrs: + if k == 'id': + id = v + elif k == '.': + classes.append(v) + elif k == 'hl_lines': + configs[k] = parse_hl_lines(v) + elif k in self.bool_options: + configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True) + else: + configs[k] = v + return id, classes, configs + + def _escape(self, txt): + """ basic html escaping """ + txt = txt.replace('&', '&') + txt = txt.replace('<', '<') + txt = txt.replace('>', '>') + txt = txt.replace('"', '"') + return txt + + +def makeExtension(**kwargs): # pragma: no cover + return FencedCodeExtension(**kwargs) diff --git a/SSG/markdown/extensions/footnotes.py b/SSG/markdown/extensions/footnotes.py new file mode 100644 index 0000000..8a2e3c5 --- /dev/null +++ b/SSG/markdown/extensions/footnotes.py @@ -0,0 +1,411 @@ +""" +Footnotes Extension for Python-Markdown +======================================= + +Adds footnote handling to Python-Markdown. + +See <https://Python-Markdown.github.io/extensions/footnotes> +for documentation. + +Copyright The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import BlockProcessor +from ..inlinepatterns import InlineProcessor +from ..treeprocessors import Treeprocessor +from ..postprocessors import Postprocessor +from .. import util +from collections import OrderedDict +import re +import copy +import xml.etree.ElementTree as etree + +FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX +NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX +RE_REF_ID = re.compile(r'(fnref)(\d+)') + + +class FootnoteExtension(Extension): + """ Footnote Extension. """ + + def __init__(self, **kwargs): + """ Setup configs. """ + + self.config = { + 'PLACE_MARKER': + ["///Footnotes Go Here///", + "The text string that marks where the footnotes go"], + 'UNIQUE_IDS': + [False, + "Avoid name collisions across " + "multiple calls to reset()."], + "BACKLINK_TEXT": + ["↩", + "The text string that links from the footnote " + "to the reader's place."], + "SUPERSCRIPT_TEXT": + ["{}", + "The text string that links from the reader's place " + "to the footnote."], + "BACKLINK_TITLE": + ["Jump back to footnote %d in the text", + "The text string used for the title HTML attribute " + "of the backlink. %d will be replaced by the " + "footnote number."], + "SEPARATOR": + [":", + "Footnote separator."] + } + super().__init__(**kwargs) + + # In multiple invocations, emit links that don't get tangled. + self.unique_prefix = 0 + self.found_refs = {} + self.used_refs = set() + + self.reset() + + def extendMarkdown(self, md): + """ Add pieces to Markdown. """ + md.registerExtension(self) + self.parser = md.parser + self.md = md + # Insert a blockprocessor before ReferencePreprocessor + md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17) + + # Insert an inline pattern before ImageReferencePattern + FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah + md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175) + # Insert a tree-processor that would actually add the footnote div + # This must be before all other treeprocessors (i.e., inline and + # codehilite) so they can run on the the contents of the div. + md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50) + + # Insert a tree-processor that will run after inline is done. + # In this tree-processor we want to check our duplicate footnote tracker + # And add additional backrefs to the footnote pointing back to the + # duplicated references. + md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15) + + # Insert a postprocessor after amp_substitute processor + md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25) + + def reset(self): + """ Clear footnotes on reset, and prepare for distinct document. """ + self.footnotes = OrderedDict() + self.unique_prefix += 1 + self.found_refs = {} + self.used_refs = set() + + def unique_ref(self, reference, found=False): + """ Get a unique reference if there are duplicates. """ + if not found: + return reference + + original_ref = reference + while reference in self.used_refs: + ref, rest = reference.split(self.get_separator(), 1) + m = RE_REF_ID.match(ref) + if m: + reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest) + else: + reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest) + + self.used_refs.add(reference) + if original_ref in self.found_refs: + self.found_refs[original_ref] += 1 + else: + self.found_refs[original_ref] = 1 + return reference + + def findFootnotesPlaceholder(self, root): + """ Return ElementTree Element that contains Footnote placeholder. """ + def finder(element): + for child in element: + if child.text: + if child.text.find(self.getConfig("PLACE_MARKER")) > -1: + return child, element, True + if child.tail: + if child.tail.find(self.getConfig("PLACE_MARKER")) > -1: + return child, element, False + child_res = finder(child) + if child_res is not None: + return child_res + return None + + res = finder(root) + return res + + def setFootnote(self, id, text): + """ Store a footnote for later retrieval. """ + self.footnotes[id] = text + + def get_separator(self): + """ Get the footnote separator. """ + return self.getConfig("SEPARATOR") + + def makeFootnoteId(self, id): + """ Return footnote link id. """ + if self.getConfig("UNIQUE_IDS"): + return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id) + else: + return 'fn{}{}'.format(self.get_separator(), id) + + def makeFootnoteRefId(self, id, found=False): + """ Return footnote back-link id. """ + if self.getConfig("UNIQUE_IDS"): + return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found) + else: + return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found) + + def makeFootnotesDiv(self, root): + """ Return div of footnotes as et Element. """ + + if not list(self.footnotes.keys()): + return None + + div = etree.Element("div") + div.set('class', 'footnote') + etree.SubElement(div, "hr") + ol = etree.SubElement(div, "ol") + surrogate_parent = etree.Element("div") + + # Backward compatibility with old '%d' placeholder + backlink_title = self.getConfig("BACKLINK_TITLE").replace("%d", "{}") + + for index, id in enumerate(self.footnotes.keys(), start=1): + li = etree.SubElement(ol, "li") + li.set("id", self.makeFootnoteId(id)) + # Parse footnote with surrogate parent as li cannot be used. + # List block handlers have special logic to deal with li. + # When we are done parsing, we will copy everything over to li. + self.parser.parseChunk(surrogate_parent, self.footnotes[id]) + for el in list(surrogate_parent): + li.append(el) + surrogate_parent.remove(el) + backlink = etree.Element("a") + backlink.set("href", "#" + self.makeFootnoteRefId(id)) + backlink.set("class", "footnote-backref") + backlink.set( + "title", + backlink_title.format(index) + ) + backlink.text = FN_BACKLINK_TEXT + + if len(li): + node = li[-1] + if node.tag == "p": + node.text = node.text + NBSP_PLACEHOLDER + node.append(backlink) + else: + p = etree.SubElement(li, "p") + p.append(backlink) + return div + + +class FootnoteBlockProcessor(BlockProcessor): + """ Find all footnote references and store for later use. """ + + RE = re.compile(r'^[ ]{0,3}\[\^([^\]]*)\]:[ ]*(.*)$', re.MULTILINE) + + def __init__(self, footnotes): + super().__init__(footnotes.parser) + self.footnotes = footnotes + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + """ Find, set, and remove footnote definitions. """ + block = blocks.pop(0) + m = self.RE.search(block) + if m: + id = m.group(1) + fn_blocks = [m.group(2)] + + # Handle rest of block + therest = block[m.end():].lstrip('\n') + m2 = self.RE.search(therest) + if m2: + # Another footnote exists in the rest of this block. + # Any content before match is continuation of this footnote, which may be lazily indented. + before = therest[:m2.start()].rstrip('\n') + fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(before)]).lstrip('\n') + # Add back to blocks everything from beginning of match forward for next iteration. + blocks.insert(0, therest[m2.start():]) + else: + # All remaining lines of block are continuation of this footnote, which may be lazily indented. + fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(therest)]).strip('\n') + + # Check for child elements in remaining blocks. + fn_blocks.extend(self.detectTabbed(blocks)) + + footnote = "\n\n".join(fn_blocks) + self.footnotes.setFootnote(id, footnote.rstrip()) + + if block[:m.start()].strip(): + # Add any content before match back to blocks as separate block + blocks.insert(0, block[:m.start()].rstrip('\n')) + return True + # No match. Restore block. + blocks.insert(0, block) + return False + + def detectTabbed(self, blocks): + """ Find indented text and remove indent before further processing. + + Returns: a list of blocks with indentation removed. + """ + fn_blocks = [] + while blocks: + if blocks[0].startswith(' '*4): + block = blocks.pop(0) + # Check for new footnotes within this block and split at new footnote. + m = self.RE.search(block) + if m: + # Another footnote exists in this block. + # Any content before match is continuation of this footnote, which may be lazily indented. + before = block[:m.start()].rstrip('\n') + fn_blocks.append(self.detab(before)) + # Add back to blocks everything from beginning of match forward for next iteration. + blocks.insert(0, block[m.start():]) + # End of this footnote. + break + else: + # Entire block is part of this footnote. + fn_blocks.append(self.detab(block)) + else: + # End of this footnote. + break + return fn_blocks + + def detab(self, block): + """ Remove one level of indent from a block. + + Preserve lazily indented blocks by only removing indent from indented lines. + """ + lines = block.split('\n') + for i, line in enumerate(lines): + if line.startswith(' '*4): + lines[i] = line[4:] + return '\n'.join(lines) + + +class FootnoteInlineProcessor(InlineProcessor): + """ InlinePattern for footnote markers in a document's body text. """ + + def __init__(self, pattern, footnotes): + super().__init__(pattern) + self.footnotes = footnotes + + def handleMatch(self, m, data): + id = m.group(1) + if id in self.footnotes.footnotes.keys(): + sup = etree.Element("sup") + a = etree.SubElement(sup, "a") + sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True)) + a.set('href', '#' + self.footnotes.makeFootnoteId(id)) + a.set('class', 'footnote-ref') + a.text = self.footnotes.getConfig("SUPERSCRIPT_TEXT").format( + list(self.footnotes.footnotes.keys()).index(id) + 1 + ) + return sup, m.start(0), m.end(0) + else: + return None, None, None + + +class FootnotePostTreeprocessor(Treeprocessor): + """ Amend footnote div with duplicates. """ + + def __init__(self, footnotes): + self.footnotes = footnotes + + def add_duplicates(self, li, duplicates): + """ Adjust current li and add the duplicates: fnref2, fnref3, etc. """ + for link in li.iter('a'): + # Find the link that needs to be duplicated. + if link.attrib.get('class', '') == 'footnote-backref': + ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1) + # Duplicate link the number of times we need to + # and point the to the appropriate references. + links = [] + for index in range(2, duplicates + 1): + sib_link = copy.deepcopy(link) + sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest) + links.append(sib_link) + self.offset += 1 + # Add all the new duplicate links. + el = list(li)[-1] + for link in links: + el.append(link) + break + + def get_num_duplicates(self, li): + """ Get the number of duplicate refs of the footnote. """ + fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1) + link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest) + return self.footnotes.found_refs.get(link_id, 0) + + def handle_duplicates(self, parent): + """ Find duplicate footnotes and format and add the duplicates. """ + for li in list(parent): + # Check number of duplicates footnotes and insert + # additional links if needed. + count = self.get_num_duplicates(li) + if count > 1: + self.add_duplicates(li, count) + + def run(self, root): + """ Crawl the footnote div and add missing duplicate footnotes. """ + self.offset = 0 + for div in root.iter('div'): + if div.attrib.get('class', '') == 'footnote': + # Footnotes should be under the first ordered list under + # the footnote div. So once we find it, quit. + for ol in div.iter('ol'): + self.handle_duplicates(ol) + break + + +class FootnoteTreeprocessor(Treeprocessor): + """ Build and append footnote div to end of document. """ + + def __init__(self, footnotes): + self.footnotes = footnotes + + def run(self, root): + footnotesDiv = self.footnotes.makeFootnotesDiv(root) + if footnotesDiv is not None: + result = self.footnotes.findFootnotesPlaceholder(root) + if result: + child, parent, isText = result + ind = list(parent).index(child) + if isText: + parent.remove(child) + parent.insert(ind, footnotesDiv) + else: + parent.insert(ind + 1, footnotesDiv) + child.tail = None + else: + root.append(footnotesDiv) + + +class FootnotePostprocessor(Postprocessor): + """ Replace placeholders with html entities. """ + def __init__(self, footnotes): + self.footnotes = footnotes + + def run(self, text): + text = text.replace( + FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT") + ) + return text.replace(NBSP_PLACEHOLDER, " ") + + +def makeExtension(**kwargs): # pragma: no cover + """ Return an instance of the FootnoteExtension """ + return FootnoteExtension(**kwargs) diff --git a/SSG/markdown/extensions/legacy_attrs.py b/SSG/markdown/extensions/legacy_attrs.py new file mode 100644 index 0000000..445aba1 --- /dev/null +++ b/SSG/markdown/extensions/legacy_attrs.py @@ -0,0 +1,67 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). + +Legacy Attributes Extension +=========================== + +An extension to Python Markdown which implements legacy attributes. + +Prior to Python-Markdown version 3.0, the Markdown class had an `enable_attributes` +keyword which was on by default and provided for attributes to be defined for elements +using the format `{@key=value}`. This extension is provided as a replacement for +backward compatibility. New documents should be authored using attr_lists. However, +numerious documents exist which have been using the old attribute format for many +years. This extension can be used to continue to render those documents correctly. +""" + +import re +from markdown.treeprocessors import Treeprocessor, isString +from markdown.extensions import Extension + + +ATTR_RE = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123} + + +class LegacyAttrs(Treeprocessor): + def run(self, doc): + """Find and set values of attributes ({@key=value}). """ + for el in doc.iter(): + alt = el.get('alt', None) + if alt is not None: + el.set('alt', self.handleAttributes(el, alt)) + if el.text and isString(el.text): + el.text = self.handleAttributes(el, el.text) + if el.tail and isString(el.tail): + el.tail = self.handleAttributes(el, el.tail) + + def handleAttributes(self, el, txt): + """ Set attributes and return text without definitions. """ + def attributeCallback(match): + el.set(match.group(1), match.group(2).replace('\n', ' ')) + return ATTR_RE.sub(attributeCallback, txt) + + +class LegacyAttrExtension(Extension): + def extendMarkdown(self, md): + md.treeprocessors.register(LegacyAttrs(md), 'legacyattrs', 15) + + +def makeExtension(**kwargs): # pragma: no cover + return LegacyAttrExtension(**kwargs) diff --git a/SSG/markdown/extensions/legacy_em.py b/SSG/markdown/extensions/legacy_em.py new file mode 100644 index 0000000..360988b --- /dev/null +++ b/SSG/markdown/extensions/legacy_em.py @@ -0,0 +1,49 @@ +''' +Legacy Em Extension for Python-Markdown +======================================= + +This extension provides legacy behavior for _connected_words_. + +Copyright 2015-2018 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +''' + +from . import Extension +from ..inlinepatterns import UnderscoreProcessor, EmStrongItem, EM_STRONG2_RE, STRONG_EM2_RE +import re + +# _emphasis_ +EMPHASIS_RE = r'(_)([^_]+)\1' + +# __strong__ +STRONG_RE = r'(_{2})(.+?)\1' + +# __strong_em___ +STRONG_EM_RE = r'(_)\1(?!\1)([^_]+?)\1(?!\1)(.+?)\1{3}' + + +class LegacyUnderscoreProcessor(UnderscoreProcessor): + """Emphasis processor for handling strong and em matches inside underscores.""" + + PATTERNS = [ + EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), + EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), + EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), + EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), + EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') + ] + + +class LegacyEmExtension(Extension): + """ Add legacy_em extension to Markdown class.""" + + def extendMarkdown(self, md): + """ Modify inline patterns. """ + md.inlinePatterns.register(LegacyUnderscoreProcessor(r'_'), 'em_strong2', 50) + + +def makeExtension(**kwargs): # pragma: no cover + """ Return an instance of the LegacyEmExtension """ + return LegacyEmExtension(**kwargs) diff --git a/SSG/markdown/extensions/md_in_html.py b/SSG/markdown/extensions/md_in_html.py new file mode 100644 index 0000000..ec7dcba --- /dev/null +++ b/SSG/markdown/extensions/md_in_html.py @@ -0,0 +1,364 @@ +""" +Python-Markdown Markdown in HTML Extension +=============================== + +An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s +parsing of Markdown syntax in raw HTML. + +See <https://Python-Markdown.github.io/extensions/raw_html> +for documentation. + +Copyright The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import BlockProcessor +from ..preprocessors import Preprocessor +from ..postprocessors import RawHtmlPostprocessor +from .. import util +from ..htmlparser import HTMLExtractor, blank_line_re +import xml.etree.ElementTree as etree + + +class HTMLExtractorExtra(HTMLExtractor): + """ + Override HTMLExtractor and create etree Elements for any elements which should have content parsed as Markdown. + """ + + def __init__(self, md, *args, **kwargs): + # All block-level tags. + self.block_level_tags = set(md.block_level_elements.copy()) + # Block-level tags in which the content only gets span level parsing + self.span_tags = set( + ['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th'] + ) + # Block-level tags which never get their content parsed. + self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea']) + + super().__init__(md, *args, **kwargs) + + # Block-level tags in which the content gets parsed as blocks + self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags) + self.span_and_blocks_tags = self.block_tags | self.span_tags + + def reset(self): + """Reset this instance. Loses all unprocessed data.""" + self.mdstack = [] # When markdown=1, stack contains a list of tags + self.treebuilder = etree.TreeBuilder() + self.mdstate = [] # one of 'block', 'span', 'off', or None + super().reset() + + def close(self): + """Handle any buffered data.""" + super().close() + # Handle any unclosed tags. + if self.mdstack: + # Close the outermost parent. handle_endtag will close all unclosed children. + self.handle_endtag(self.mdstack[0]) + + def get_element(self): + """ Return element from treebuilder and reset treebuilder for later use. """ + element = self.treebuilder.close() + self.treebuilder = etree.TreeBuilder() + return element + + def get_state(self, tag, attrs): + """ Return state from tag and `markdown` attr. One of 'block', 'span', or 'off'. """ + md_attr = attrs.get('markdown', '0') + if md_attr == 'markdown': + # `<tag markdown>` is the same as `<tag markdown='1'>`. + md_attr = '1' + parent_state = self.mdstate[-1] if self.mdstate else None + if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'): + # Only use the parent state if it is more restrictive than the markdown attribute. + md_attr = parent_state + if ((md_attr == '1' and tag in self.block_tags) or + (md_attr == 'block' and tag in self.span_and_blocks_tags)): + return 'block' + elif ((md_attr == '1' and tag in self.span_tags) or + (md_attr == 'span' and tag in self.span_and_blocks_tags)): + return 'span' + elif tag in self.block_level_tags: + return 'off' + else: # pragma: no cover + return None + + def handle_starttag(self, tag, attrs): + # Handle tags that should always be empty and do not specify a closing tag + if tag in self.empty_tags and (self.at_line_start() or self.intail): + attrs = {key: value if value is not None else key for key, value in attrs} + if "markdown" in attrs: + attrs.pop('markdown') + element = etree.Element(tag, attrs) + data = etree.tostring(element, encoding='unicode', method='html') + else: + data = self.get_starttag_text() + self.handle_empty_tag(data, True) + return + + if tag in self.block_level_tags and (self.at_line_start() or self.intail): + # Valueless attr (ex: `<tag checked>`) results in `[('checked', None)]`. + # Convert to `{'checked': 'checked'}`. + attrs = {key: value if value is not None else key for key, value in attrs} + state = self.get_state(tag, attrs) + if self.inraw or (state in [None, 'off'] and not self.mdstack): + # fall back to default behavior + attrs.pop('markdown', None) + super().handle_starttag(tag, attrs) + else: + if 'p' in self.mdstack and tag in self.block_level_tags: + # Close unclosed 'p' tag + self.handle_endtag('p') + self.mdstate.append(state) + self.mdstack.append(tag) + attrs['markdown'] = state + self.treebuilder.start(tag, attrs) + else: + # Span level tag + if self.inraw: + super().handle_starttag(tag, attrs) + else: + text = self.get_starttag_text() + if self.mdstate and self.mdstate[-1] == "off": + self.handle_data(self.md.htmlStash.store(text)) + else: + self.handle_data(text) + if tag in self.CDATA_CONTENT_ELEMENTS: + # This is presumably a standalone tag in a code span (see #1036). + self.clear_cdata_mode() + + def handle_endtag(self, tag): + if tag in self.block_level_tags: + if self.inraw: + super().handle_endtag(tag) + elif tag in self.mdstack: + # Close element and any unclosed children + while self.mdstack: + item = self.mdstack.pop() + self.mdstate.pop() + self.treebuilder.end(item) + if item == tag: + break + if not self.mdstack: + # Last item in stack is closed. Stash it + element = self.get_element() + # Get last entry to see if it ends in newlines + # If it is an element, assume there is no newlines + item = self.cleandoc[-1] if self.cleandoc else '' + # If we only have one newline before block element, add another + if not item.endswith('\n\n') and item.endswith('\n'): + self.cleandoc.append('\n') + self.cleandoc.append(self.md.htmlStash.store(element)) + self.cleandoc.append('\n\n') + self.state = [] + # Check if element has a tail + if not blank_line_re.match( + self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]): + # More content exists after endtag. + self.intail = True + else: + # Treat orphan closing tag as a span level tag. + text = self.get_endtag_text(tag) + if self.mdstate and self.mdstate[-1] == "off": + self.handle_data(self.md.htmlStash.store(text)) + else: + self.handle_data(text) + else: + # Span level tag + if self.inraw: + super().handle_endtag(tag) + else: + text = self.get_endtag_text(tag) + if self.mdstate and self.mdstate[-1] == "off": + self.handle_data(self.md.htmlStash.store(text)) + else: + self.handle_data(text) + + def handle_startendtag(self, tag, attrs): + if tag in self.empty_tags: + attrs = {key: value if value is not None else key for key, value in attrs} + if "markdown" in attrs: + attrs.pop('markdown') + element = etree.Element(tag, attrs) + data = etree.tostring(element, encoding='unicode', method='html') + else: + data = self.get_starttag_text() + else: + data = self.get_starttag_text() + self.handle_empty_tag(data, is_block=self.md.is_block_level(tag)) + + def handle_data(self, data): + if self.intail and '\n' in data: + self.intail = False + if self.inraw or not self.mdstack: + super().handle_data(data) + else: + self.treebuilder.data(data) + + def handle_empty_tag(self, data, is_block): + if self.inraw or not self.mdstack: + super().handle_empty_tag(data, is_block) + else: + if self.at_line_start() and is_block: + self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n') + else: + self.handle_data(self.md.htmlStash.store(data)) + + def parse_pi(self, i): + if self.at_line_start() or self.intail or self.mdstack: + # The same override exists in HTMLExtractor without the check + # for mdstack. Therefore, use HTMLExtractor's parent instead. + return super(HTMLExtractor, self).parse_pi(i) + # This is not the beginning of a raw block so treat as plain data + # and avoid consuming any tags which may follow (see #1066). + self.handle_data('<?') + return i + 2 + + def parse_html_declaration(self, i): + if self.at_line_start() or self.intail or self.mdstack: + # The same override exists in HTMLExtractor without the check + # for mdstack. Therefore, use HTMLExtractor's parent instead. + return super(HTMLExtractor, self).parse_html_declaration(i) + # This is not the beginning of a raw block so treat as plain data + # and avoid consuming any tags which may follow (see #1066). + self.handle_data('<!') + return i + 2 + + +class HtmlBlockPreprocessor(Preprocessor): + """Remove html blocks from the text and store them for later retrieval.""" + + def run(self, lines): + source = '\n'.join(lines) + parser = HTMLExtractorExtra(self.md) + parser.feed(source) + parser.close() + return ''.join(parser.cleandoc).split('\n') + + +class MarkdownInHtmlProcessor(BlockProcessor): + """Process Markdown Inside HTML Blocks which have been stored in the HtmlStash.""" + + def test(self, parent, block): + # ALways return True. `run` will return `False` it not a valid match. + return True + + def parse_element_content(self, element): + """ + Recursively parse the text content of an etree Element as Markdown. + + Any block level elements generated from the Markdown will be inserted as children of the element in place + of the text content. All `markdown` attributes are removed. For any elements in which Markdown parsing has + been disabled, the text content of it and its chidlren are wrapped in an `AtomicString`. + """ + + md_attr = element.attrib.pop('markdown', 'off') + + if md_attr == 'block': + # Parse content as block level + # The order in which the different parts are parsed (text, children, tails) is important here as the + # order of elements needs to be preserved. We can't be inserting items at a later point in the current + # iteration as we don't want to do raw processing on elements created from parsing Markdown text (for + # example). Therefore, the order of operations is children, tails, text. + + # Recursively parse existing children from raw HTML + for child in list(element): + self.parse_element_content(child) + + # Parse Markdown text in tail of children. Do this separate to avoid raw HTML parsing. + # Save the position of each item to be inserted later in reverse. + tails = [] + for pos, child in enumerate(element): + if child.tail: + block = child.tail.rstrip('\n') + child.tail = '' + # Use a dummy placeholder element. + dummy = etree.Element('div') + self.parser.parseBlocks(dummy, block.split('\n\n')) + children = list(dummy) + children.reverse() + tails.append((pos + 1, children)) + + # Insert the elements created from the tails in reverse. + tails.reverse() + for pos, tail in tails: + for item in tail: + element.insert(pos, item) + + # Parse Markdown text content. Do this last to avoid raw HTML parsing. + if element.text: + block = element.text.rstrip('\n') + element.text = '' + # Use a dummy placeholder element as the content needs to get inserted before existing children. + dummy = etree.Element('div') + self.parser.parseBlocks(dummy, block.split('\n\n')) + children = list(dummy) + children.reverse() + for child in children: + element.insert(0, child) + + elif md_attr == 'span': + # Span level parsing will be handled by inlineprocessors. + # Walk children here to remove any `markdown` attributes. + for child in list(element): + self.parse_element_content(child) + + else: + # Disable inline parsing for everything else + if element.text is None: + element.text = '' + element.text = util.AtomicString(element.text) + for child in list(element): + self.parse_element_content(child) + if child.tail: + child.tail = util.AtomicString(child.tail) + + def run(self, parent, blocks): + m = util.HTML_PLACEHOLDER_RE.match(blocks[0]) + if m: + index = int(m.group(1)) + element = self.parser.md.htmlStash.rawHtmlBlocks[index] + if isinstance(element, etree.Element): + # We have a matched element. Process it. + blocks.pop(0) + self.parse_element_content(element) + parent.append(element) + # Cleanup stash. Replace element with empty string to avoid confusing postprocessor. + self.parser.md.htmlStash.rawHtmlBlocks.pop(index) + self.parser.md.htmlStash.rawHtmlBlocks.insert(index, '') + # Confirm the match to the blockparser. + return True + # No match found. + return False + + +class MarkdownInHTMLPostprocessor(RawHtmlPostprocessor): + def stash_to_string(self, text): + """ Override default to handle any etree elements still in the stash. """ + if isinstance(text, etree.Element): + return self.md.serializer(text) + else: + return str(text) + + +class MarkdownInHtmlExtension(Extension): + """Add Markdown parsing in HTML to Markdown class.""" + + def extendMarkdown(self, md): + """ Register extension instances. """ + + # Replace raw HTML preprocessor + md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) + # Add blockprocessor which handles the placeholders for etree elements + md.parser.blockprocessors.register( + MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105 + ) + # Replace raw HTML postprocessor + md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30) + + +def makeExtension(**kwargs): # pragma: no cover + return MarkdownInHtmlExtension(**kwargs) diff --git a/SSG/markdown/extensions/meta.py b/SSG/markdown/extensions/meta.py new file mode 100644 index 0000000..10dee11 --- /dev/null +++ b/SSG/markdown/extensions/meta.py @@ -0,0 +1,79 @@ +""" +Meta Data Extension for Python-Markdown +======================================= + +This extension adds Meta Data handling to markdown. + +See <https://Python-Markdown.github.io/extensions/meta_data> +for documentation. + +Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com). + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..preprocessors import Preprocessor +import re +import logging + +log = logging.getLogger('MARKDOWN') + +# Global Vars +META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)') +META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)') +BEGIN_RE = re.compile(r'^-{3}(\s.*)?') +END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?') + + +class MetaExtension (Extension): + """ Meta-Data extension for Python-Markdown. """ + + def extendMarkdown(self, md): + """ Add MetaPreprocessor to Markdown instance. """ + md.registerExtension(self) + self.md = md + md.preprocessors.register(MetaPreprocessor(md), 'meta', 27) + + def reset(self): + self.md.Meta = {} + + +class MetaPreprocessor(Preprocessor): + """ Get Meta-Data. """ + + def run(self, lines): + """ Parse Meta-Data and store in Markdown.Meta. """ + meta = {} + key = None + if lines and BEGIN_RE.match(lines[0]): + lines.pop(0) + while lines: + line = lines.pop(0) + m1 = META_RE.match(line) + if line.strip() == '' or END_RE.match(line): + break # blank line or end of YAML header - done + if m1: + key = m1.group('key').lower().strip() + value = m1.group('value').strip() + try: + meta[key].append(value) + except KeyError: + meta[key] = [value] + else: + m2 = META_MORE_RE.match(line) + if m2 and key: + # Add another line to existing key + meta[key].append(m2.group('value').strip()) + else: + lines.insert(0, line) + break # no meta data - done + self.md.Meta = meta + return lines + + +def makeExtension(**kwargs): # pragma: no cover + return MetaExtension(**kwargs) diff --git a/SSG/markdown/extensions/nl2br.py b/SSG/markdown/extensions/nl2br.py new file mode 100644 index 0000000..6c7491b --- /dev/null +++ b/SSG/markdown/extensions/nl2br.py @@ -0,0 +1,33 @@ +""" +NL2BR Extension +=============== + +A Python-Markdown extension to treat newlines as hard breaks; like +GitHub-flavored Markdown does. + +See <https://Python-Markdown.github.io/extensions/nl2br> +for documentation. + +Oringinal code Copyright 2011 [Brian Neal](https://deathofagremmie.com/) + +All changes Copyright 2011-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..inlinepatterns import SubstituteTagInlineProcessor + +BR_RE = r'\n' + + +class Nl2BrExtension(Extension): + + def extendMarkdown(self, md): + br_tag = SubstituteTagInlineProcessor(BR_RE, 'br') + md.inlinePatterns.register(br_tag, 'nl', 5) + + +def makeExtension(**kwargs): # pragma: no cover + return Nl2BrExtension(**kwargs) diff --git a/SSG/markdown/extensions/sane_lists.py b/SSG/markdown/extensions/sane_lists.py new file mode 100644 index 0000000..e27eb18 --- /dev/null +++ b/SSG/markdown/extensions/sane_lists.py @@ -0,0 +1,54 @@ +""" +Sane List Extension for Python-Markdown +======================================= + +Modify the behavior of Lists in Python-Markdown to act in a sane manor. + +See <https://Python-Markdown.github.io/extensions/sane_lists> +for documentation. + +Original code Copyright 2011 [Waylan Limberg](http://achinghead.com) + +All changes Copyright 2011-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import OListProcessor, UListProcessor +import re + + +class SaneOListProcessor(OListProcessor): + + SIBLING_TAGS = ['ol'] + LAZY_OL = False + + def __init__(self, parser): + super().__init__(parser) + self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' % + (self.tab_length - 1)) + + +class SaneUListProcessor(UListProcessor): + + SIBLING_TAGS = ['ul'] + + def __init__(self, parser): + super().__init__(parser) + self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' % + (self.tab_length - 1)) + + +class SaneListExtension(Extension): + """ Add sane lists to Markdown. """ + + def extendMarkdown(self, md): + """ Override existing Processors. """ + md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40) + md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30) + + +def makeExtension(**kwargs): # pragma: no cover + return SaneListExtension(**kwargs) diff --git a/SSG/markdown/extensions/smarty.py b/SSG/markdown/extensions/smarty.py new file mode 100644 index 0000000..c4bfd58 --- /dev/null +++ b/SSG/markdown/extensions/smarty.py @@ -0,0 +1,257 @@ +''' +Smarty extension for Python-Markdown +==================================== + +Adds conversion of ASCII dashes, quotes and ellipses to their HTML +entity equivalents. + +See <https://Python-Markdown.github.io/extensions/smarty> +for documentation. + +Author: 2013, Dmitry Shachnev <mitya57@gmail.com> + +All changes Copyright 2013-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +SmartyPants license: + + Copyright (c) 2003 John Gruber <https://daringfireball.net/> + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name "SmartyPants" nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + This software is provided by the copyright holders and contributors "as + is" and any express or implied warranties, including, but not limited + to, the implied warranties of merchantability and fitness for a + particular purpose are disclaimed. In no event shall the copyright + owner or contributors be liable for any direct, indirect, incidental, + special, exemplary, or consequential damages (including, but not + limited to, procurement of substitute goods or services; loss of use, + data, or profits; or business interruption) however caused and on any + theory of liability, whether in contract, strict liability, or tort + (including negligence or otherwise) arising in any way out of the use + of this software, even if advised of the possibility of such damage. + + +smartypants.py license: + + smartypants.py is a derivative work of SmartyPants. + Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/> + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + This software is provided by the copyright holders and contributors "as + is" and any express or implied warranties, including, but not limited + to, the implied warranties of merchantability and fitness for a + particular purpose are disclaimed. In no event shall the copyright + owner or contributors be liable for any direct, indirect, incidental, + special, exemplary, or consequential damages (including, but not + limited to, procurement of substitute goods or services; loss of use, + data, or profits; or business interruption) however caused and on any + theory of liability, whether in contract, strict liability, or tort + (including negligence or otherwise) arising in any way out of the use + of this software, even if advised of the possibility of such damage. + +''' + + +from . import Extension +from ..inlinepatterns import HtmlInlineProcessor, HTML_RE +from ..treeprocessors import InlineProcessor +from ..util import Registry + + +# Constants for quote education. +punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" +endOfWordClass = r"[\s.,;:!?)]" +closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]" + +openingQuotesBase = ( + r'(\s' # a whitespace char + r'| ' # or a non-breaking space entity + r'|--' # or dashes + r'|–|—' # or unicode + r'|&[mn]dash;' # or named dash entities + r'|–|—' # or decimal entities + r')' +) + +substitutions = { + 'mdash': '—', + 'ndash': '–', + 'ellipsis': '…', + 'left-angle-quote': '«', + 'right-angle-quote': '»', + 'left-single-quote': '‘', + 'right-single-quote': '’', + 'left-double-quote': '“', + 'right-double-quote': '”', +} + + +# Special case if the very first character is a quote +# followed by punctuation at a non-word-break. Close the quotes by brute force: +singleQuoteStartRe = r"^'(?=%s\B)" % punctClass +doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass + +# Special case for double sets of quotes, e.g.: +# <p>He said, "'Quoted' words in a larger quote."</p> +doubleQuoteSetsRe = r""""'(?=\w)""" +singleQuoteSetsRe = r"""'"(?=\w)""" + +# Special case for decade abbreviations (the '80s): +decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)" + +# Get most opening double quotes: +openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase + +# Double closing quotes: +closingDoubleQuotesRegex = r'"(?=\s)' +closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass + +# Get most opening single quotes: +openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase + +# Single closing quotes: +closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass +closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass + +# All remaining quotes should be opening ones +remainingSingleQuotesRegex = r"'" +remainingDoubleQuotesRegex = r'"' + +HTML_STRICT_RE = HTML_RE + r'(?!\>)' + + +class SubstituteTextPattern(HtmlInlineProcessor): + def __init__(self, pattern, replace, md): + """ Replaces matches with some text. """ + HtmlInlineProcessor.__init__(self, pattern) + self.replace = replace + self.md = md + + def handleMatch(self, m, data): + result = '' + for part in self.replace: + if isinstance(part, int): + result += m.group(part) + else: + result += self.md.htmlStash.store(part) + return result, m.start(0), m.end(0) + + +class SmartyExtension(Extension): + def __init__(self, **kwargs): + self.config = { + 'smart_quotes': [True, 'Educate quotes'], + 'smart_angled_quotes': [False, 'Educate angled quotes'], + 'smart_dashes': [True, 'Educate dashes'], + 'smart_ellipses': [True, 'Educate ellipses'], + 'substitutions': [{}, 'Overwrite default substitutions'], + } + super().__init__(**kwargs) + self.substitutions = dict(substitutions) + self.substitutions.update(self.getConfig('substitutions', default={})) + + def _addPatterns(self, md, patterns, serie, priority): + for ind, pattern in enumerate(patterns): + pattern += (md,) + pattern = SubstituteTextPattern(*pattern) + name = 'smarty-%s-%d' % (serie, ind) + self.inlinePatterns.register(pattern, name, priority-ind) + + def educateDashes(self, md): + emDashesPattern = SubstituteTextPattern( + r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md + ) + enDashesPattern = SubstituteTextPattern( + r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md + ) + self.inlinePatterns.register(emDashesPattern, 'smarty-em-dashes', 50) + self.inlinePatterns.register(enDashesPattern, 'smarty-en-dashes', 45) + + def educateEllipses(self, md): + ellipsesPattern = SubstituteTextPattern( + r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md + ) + self.inlinePatterns.register(ellipsesPattern, 'smarty-ellipses', 10) + + def educateAngledQuotes(self, md): + leftAngledQuotePattern = SubstituteTextPattern( + r'\<\<', (self.substitutions['left-angle-quote'],), md + ) + rightAngledQuotePattern = SubstituteTextPattern( + r'\>\>', (self.substitutions['right-angle-quote'],), md + ) + self.inlinePatterns.register(leftAngledQuotePattern, 'smarty-left-angle-quotes', 40) + self.inlinePatterns.register(rightAngledQuotePattern, 'smarty-right-angle-quotes', 35) + + def educateQuotes(self, md): + lsquo = self.substitutions['left-single-quote'] + rsquo = self.substitutions['right-single-quote'] + ldquo = self.substitutions['left-double-quote'] + rdquo = self.substitutions['right-double-quote'] + patterns = ( + (singleQuoteStartRe, (rsquo,)), + (doubleQuoteStartRe, (rdquo,)), + (doubleQuoteSetsRe, (ldquo + lsquo,)), + (singleQuoteSetsRe, (lsquo + ldquo,)), + (decadeAbbrRe, (rsquo,)), + (openingSingleQuotesRegex, (1, lsquo)), + (closingSingleQuotesRegex, (rsquo,)), + (closingSingleQuotesRegex2, (rsquo, 1)), + (remainingSingleQuotesRegex, (lsquo,)), + (openingDoubleQuotesRegex, (1, ldquo)), + (closingDoubleQuotesRegex, (rdquo,)), + (closingDoubleQuotesRegex2, (rdquo,)), + (remainingDoubleQuotesRegex, (ldquo,)) + ) + self._addPatterns(md, patterns, 'quotes', 30) + + def extendMarkdown(self, md): + configs = self.getConfigs() + self.inlinePatterns = Registry() + if configs['smart_ellipses']: + self.educateEllipses(md) + if configs['smart_quotes']: + self.educateQuotes(md) + if configs['smart_angled_quotes']: + self.educateAngledQuotes(md) + # Override HTML_RE from inlinepatterns.py so that it does not + # process tags with duplicate closing quotes. + md.inlinePatterns.register(HtmlInlineProcessor(HTML_STRICT_RE, md), 'html', 90) + if configs['smart_dashes']: + self.educateDashes(md) + inlineProcessor = InlineProcessor(md) + inlineProcessor.inlinePatterns = self.inlinePatterns + md.treeprocessors.register(inlineProcessor, 'smarty', 2) + md.ESCAPED_CHARS.extend(['"', "'"]) + + +def makeExtension(**kwargs): # pragma: no cover + return SmartyExtension(**kwargs) diff --git a/SSG/markdown/extensions/tables.py b/SSG/markdown/extensions/tables.py new file mode 100644 index 0000000..c8b1024 --- /dev/null +++ b/SSG/markdown/extensions/tables.py @@ -0,0 +1,236 @@ +""" +Tables Extension for Python-Markdown +==================================== + +Added parsing of tables to Python-Markdown. + +See <https://Python-Markdown.github.io/extensions/tables> +for documentation. + +Original code Copyright 2009 [Waylan Limberg](http://achinghead.com) + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..blockprocessors import BlockProcessor +import xml.etree.ElementTree as etree +import re +PIPE_NONE = 0 +PIPE_LEFT = 1 +PIPE_RIGHT = 2 + + +class TableProcessor(BlockProcessor): + """ Process Tables. """ + + RE_CODE_PIPES = re.compile(r'(?:(\\\\)|(\\`+)|(`+)|(\\\|)|(\|))') + RE_END_BORDER = re.compile(r'(?<!\\)(?:\\\\)*\|$') + + def __init__(self, parser, config): + self.border = False + self.separator = '' + self.config = config + + super().__init__(parser) + + def test(self, parent, block): + """ + Ensure first two rows (column header and separator row) are valid table rows. + + Keep border check and separator row do avoid repeating the work. + """ + is_table = False + rows = [row.strip(' ') for row in block.split('\n')] + if len(rows) > 1: + header0 = rows[0] + self.border = PIPE_NONE + if header0.startswith('|'): + self.border |= PIPE_LEFT + if self.RE_END_BORDER.search(header0) is not None: + self.border |= PIPE_RIGHT + row = self._split_row(header0) + row0_len = len(row) + is_table = row0_len > 1 + + # Each row in a single column table needs at least one pipe. + if not is_table and row0_len == 1 and self.border: + for index in range(1, len(rows)): + is_table = rows[index].startswith('|') + if not is_table: + is_table = self.RE_END_BORDER.search(rows[index]) is not None + if not is_table: + break + + if is_table: + row = self._split_row(rows[1]) + is_table = (len(row) == row0_len) and set(''.join(row)) <= set('|:- ') + if is_table: + self.separator = row + + return is_table + + def run(self, parent, blocks): + """ Parse a table block and build table. """ + block = blocks.pop(0).split('\n') + header = block[0].strip(' ') + rows = [] if len(block) < 3 else block[2:] + + # Get alignment of columns + align = [] + for c in self.separator: + c = c.strip(' ') + if c.startswith(':') and c.endswith(':'): + align.append('center') + elif c.startswith(':'): + align.append('left') + elif c.endswith(':'): + align.append('right') + else: + align.append(None) + + # Build table + table = etree.SubElement(parent, 'table') + thead = etree.SubElement(table, 'thead') + self._build_row(header, thead, align) + tbody = etree.SubElement(table, 'tbody') + if len(rows) == 0: + # Handle empty table + self._build_empty_row(tbody, align) + else: + for row in rows: + self._build_row(row.strip(' '), tbody, align) + + def _build_empty_row(self, parent, align): + """Build an empty row.""" + tr = etree.SubElement(parent, 'tr') + count = len(align) + while count: + etree.SubElement(tr, 'td') + count -= 1 + + def _build_row(self, row, parent, align): + """ Given a row of text, build table cells. """ + tr = etree.SubElement(parent, 'tr') + tag = 'td' + if parent.tag == 'thead': + tag = 'th' + cells = self._split_row(row) + # We use align here rather than cells to ensure every row + # contains the same number of columns. + for i, a in enumerate(align): + c = etree.SubElement(tr, tag) + try: + c.text = cells[i].strip(' ') + except IndexError: # pragma: no cover + c.text = "" + if a: + if self.config['use_align_attribute']: + c.set('align', a) + else: + c.set('style', f'text-align: {a};') + + def _split_row(self, row): + """ split a row of text into list of cells. """ + if self.border: + if row.startswith('|'): + row = row[1:] + row = self.RE_END_BORDER.sub('', row) + return self._split(row) + + def _split(self, row): + """ split a row of text with some code into a list of cells. """ + elements = [] + pipes = [] + tics = [] + tic_points = [] + tic_region = [] + good_pipes = [] + + # Parse row + # Throw out \\, and \| + for m in self.RE_CODE_PIPES.finditer(row): + # Store ` data (len, start_pos, end_pos) + if m.group(2): + # \`+ + # Store length of each tic group: subtract \ + tics.append(len(m.group(2)) - 1) + # Store start of group, end of group, and escape length + tic_points.append((m.start(2), m.end(2) - 1, 1)) + elif m.group(3): + # `+ + # Store length of each tic group + tics.append(len(m.group(3))) + # Store start of group, end of group, and escape length + tic_points.append((m.start(3), m.end(3) - 1, 0)) + # Store pipe location + elif m.group(5): + pipes.append(m.start(5)) + + # Pair up tics according to size if possible + # Subtract the escape length *only* from the opening. + # Walk through tic list and see if tic has a close. + # Store the tic region (start of region, end of region). + pos = 0 + tic_len = len(tics) + while pos < tic_len: + try: + tic_size = tics[pos] - tic_points[pos][2] + if tic_size == 0: + raise ValueError + index = tics[pos + 1:].index(tic_size) + 1 + tic_region.append((tic_points[pos][0], tic_points[pos + index][1])) + pos += index + 1 + except ValueError: + pos += 1 + + # Resolve pipes. Check if they are within a tic pair region. + # Walk through pipes comparing them to each region. + # - If pipe position is less that a region, it isn't in a region + # - If it is within a region, we don't want it, so throw it out + # - If we didn't throw it out, it must be a table pipe + for pipe in pipes: + throw_out = False + for region in tic_region: + if pipe < region[0]: + # Pipe is not in a region + break + elif region[0] <= pipe <= region[1]: + # Pipe is within a code region. Throw it out. + throw_out = True + break + if not throw_out: + good_pipes.append(pipe) + + # Split row according to table delimiters. + pos = 0 + for pipe in good_pipes: + elements.append(row[pos:pipe]) + pos = pipe + 1 + elements.append(row[pos:]) + return elements + + +class TableExtension(Extension): + """ Add tables to Markdown. """ + + def __init__(self, **kwargs): + self.config = { + 'use_align_attribute': [False, 'True to use align attribute instead of style.'], + } + + super().__init__(**kwargs) + + def extendMarkdown(self, md): + """ Add an instance of TableProcessor to BlockParser. """ + if '|' not in md.ESCAPED_CHARS: + md.ESCAPED_CHARS.append('|') + processor = TableProcessor(md.parser, self.getConfigs()) + md.parser.blockprocessors.register(processor, 'table', 75) + + +def makeExtension(**kwargs): # pragma: no cover + return TableExtension(**kwargs) diff --git a/SSG/markdown/extensions/toc.py b/SSG/markdown/extensions/toc.py new file mode 100644 index 0000000..1ded18d --- /dev/null +++ b/SSG/markdown/extensions/toc.py @@ -0,0 +1,384 @@ +""" +Table of Contents Extension for Python-Markdown +=============================================== + +See <https://Python-Markdown.github.io/extensions/toc> +for documentation. + +Oringinal code Copyright 2008 [Jack Miller](https://codezen.org/) + +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +""" + +from . import Extension +from ..treeprocessors import Treeprocessor +from ..util import code_escape, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, AtomicString +from ..treeprocessors import UnescapeTreeprocessor +import re +import html +import unicodedata +import xml.etree.ElementTree as etree + + +def slugify(value, separator, unicode=False): + """ Slugify a string, to make it URL friendly. """ + if not unicode: + # Replace Extended Latin characters with ASCII, i.e. žlutý → zluty + value = unicodedata.normalize('NFKD', value) + value = value.encode('ascii', 'ignore').decode('ascii') + value = re.sub(r'[^\w\s-]', '', value).strip().lower() + return re.sub(r'[{}\s]+'.format(separator), separator, value) + + +def slugify_unicode(value, separator): + """ Slugify a string, to make it URL friendly while preserving Unicode characters. """ + return slugify(value, separator, unicode=True) + + +IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$') + + +def unique(id, ids): + """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """ + while id in ids or not id: + m = IDCOUNT_RE.match(id) + if m: + id = '%s_%d' % (m.group(1), int(m.group(2))+1) + else: + id = '%s_%d' % (id, 1) + ids.add(id) + return id + + +def get_name(el): + """Get title name.""" + + text = [] + for c in el.itertext(): + if isinstance(c, AtomicString): + text.append(html.unescape(c)) + else: + text.append(c) + return ''.join(text).strip() + + +def stashedHTML2text(text, md, strip_entities=True): + """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """ + def _html_sub(m): + """ Substitute raw html with plain text. """ + try: + raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))] + except (IndexError, TypeError): # pragma: no cover + return m.group(0) + # Strip out tags and/or entities - leaving text + res = re.sub(r'(<[^>]+>)', '', raw) + if strip_entities: + res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res) + return res + + return HTML_PLACEHOLDER_RE.sub(_html_sub, text) + + +def unescape(text): + """ Unescape escaped text. """ + c = UnescapeTreeprocessor() + return c.unescape(text) + + +def nest_toc_tokens(toc_list): + """Given an unsorted list with errors and skips, return a nested one. + [{'level': 1}, {'level': 2}] + => + [{'level': 1, 'children': [{'level': 2, 'children': []}]}] + + A wrong list is also converted: + [{'level': 2}, {'level': 1}] + => + [{'level': 2, 'children': []}, {'level': 1, 'children': []}] + """ + + ordered_list = [] + if len(toc_list): + # Initialize everything by processing the first entry + last = toc_list.pop(0) + last['children'] = [] + levels = [last['level']] + ordered_list.append(last) + parents = [] + + # Walk the rest nesting the entries properly + while toc_list: + t = toc_list.pop(0) + current_level = t['level'] + t['children'] = [] + + # Reduce depth if current level < last item's level + if current_level < levels[-1]: + # Pop last level since we know we are less than it + levels.pop() + + # Pop parents and levels we are less than or equal to + to_pop = 0 + for p in reversed(parents): + if current_level <= p['level']: + to_pop += 1 + else: # pragma: no cover + break + if to_pop: + levels = levels[:-to_pop] + parents = parents[:-to_pop] + + # Note current level as last + levels.append(current_level) + + # Level is the same, so append to + # the current parent (if available) + if current_level == levels[-1]: + (parents[-1]['children'] if parents + else ordered_list).append(t) + + # Current level is > last item's level, + # So make last item a parent and append current as child + else: + last['children'].append(t) + parents.append(last) + levels.append(current_level) + last = t + + return ordered_list + + +class TocTreeprocessor(Treeprocessor): + def __init__(self, md, config): + super().__init__(md) + + self.marker = config["marker"] + self.title = config["title"] + self.base_level = int(config["baselevel"]) - 1 + self.slugify = config["slugify"] + self.sep = config["separator"] + self.toc_class = config["toc_class"] + self.use_anchors = parseBoolValue(config["anchorlink"]) + self.anchorlink_class = config["anchorlink_class"] + self.use_permalinks = parseBoolValue(config["permalink"], False) + if self.use_permalinks is None: + self.use_permalinks = config["permalink"] + self.permalink_class = config["permalink_class"] + self.permalink_title = config["permalink_title"] + self.header_rgx = re.compile("[Hh][123456]") + if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]: + self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')] + else: + self.toc_top = 1 + self.toc_bottom = int(config["toc_depth"]) + + def iterparent(self, node): + ''' Iterator wrapper to get allowed parent and child all at once. ''' + + # We do not allow the marker inside a header as that + # would causes an enless loop of placing a new TOC + # inside previously generated TOC. + for child in node: + if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']: + yield node, child + yield from self.iterparent(child) + + def replace_marker(self, root, elem): + ''' Replace marker with elem. ''' + for (p, c) in self.iterparent(root): + text = ''.join(c.itertext()).strip() + if not text: + continue + + # To keep the output from screwing up the + # validation by putting a <div> inside of a <p> + # we actually replace the <p> in its entirety. + + # The <p> element may contain more than a single text content + # (nl2br can introduce a <br>). In this situation, c.text returns + # the very first content, ignore children contents or tail content. + # len(c) == 0 is here to ensure there is only text in the <p>. + if c.text and c.text.strip() == self.marker and len(c) == 0: + for i in range(len(p)): + if p[i] == c: + p[i] = elem + break + + def set_level(self, elem): + ''' Adjust header level according to base level. ''' + level = int(elem.tag[-1]) + self.base_level + if level > 6: + level = 6 + elem.tag = 'h%d' % level + + def add_anchor(self, c, elem_id): # @ReservedAssignment + anchor = etree.Element("a") + anchor.text = c.text + anchor.attrib["href"] = "#" + elem_id + anchor.attrib["class"] = self.anchorlink_class + c.text = "" + for elem in c: + anchor.append(elem) + while len(c): + c.remove(c[0]) + c.append(anchor) + + def add_permalink(self, c, elem_id): + permalink = etree.Element("a") + permalink.text = ("%spara;" % AMP_SUBSTITUTE + if self.use_permalinks is True + else self.use_permalinks) + permalink.attrib["href"] = "#" + elem_id + permalink.attrib["class"] = self.permalink_class + if self.permalink_title: + permalink.attrib["title"] = self.permalink_title + c.append(permalink) + + def build_toc_div(self, toc_list): + """ Return a string div given a toc list. """ + div = etree.Element("div") + div.attrib["class"] = self.toc_class + + # Add title to the div + if self.title: + header = etree.SubElement(div, "span") + header.attrib["class"] = "toctitle" + header.text = self.title + + def build_etree_ul(toc_list, parent): + ul = etree.SubElement(parent, "ul") + for item in toc_list: + # List item link, to be inserted into the toc div + li = etree.SubElement(ul, "li") + link = etree.SubElement(li, "a") + link.text = item.get('name', '') + link.attrib["href"] = '#' + item.get('id', '') + if item['children']: + build_etree_ul(item['children'], li) + return ul + + build_etree_ul(toc_list, div) + + if 'prettify' in self.md.treeprocessors: + self.md.treeprocessors['prettify'].run(div) + + return div + + def run(self, doc): + # Get a list of id attributes + used_ids = set() + for el in doc.iter(): + if "id" in el.attrib: + used_ids.add(el.attrib["id"]) + + toc_tokens = [] + for el in doc.iter(): + if isinstance(el.tag, str) and self.header_rgx.match(el.tag): + self.set_level(el) + text = get_name(el) + + # Do not override pre-existing ids + if "id" not in el.attrib: + innertext = unescape(stashedHTML2text(text, self.md)) + el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids) + + if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom: + toc_tokens.append({ + 'level': int(el.tag[-1]), + 'id': el.attrib["id"], + 'name': stashedHTML2text( + code_escape(el.attrib.get('data-toc-label', text)), + self.md, strip_entities=False + ) + }) + + # Remove the data-toc-label attribute as it is no longer needed + if 'data-toc-label' in el.attrib: + del el.attrib['data-toc-label'] + + if self.use_anchors: + self.add_anchor(el, el.attrib["id"]) + if self.use_permalinks not in [False, None]: + self.add_permalink(el, el.attrib["id"]) + + toc_tokens = nest_toc_tokens(toc_tokens) + div = self.build_toc_div(toc_tokens) + if self.marker: + self.replace_marker(doc, div) + + # serialize and attach to markdown instance. + toc = self.md.serializer(div) + for pp in self.md.postprocessors: + toc = pp.run(toc) + self.md.toc_tokens = toc_tokens + self.md.toc = toc + + +class TocExtension(Extension): + + TreeProcessorClass = TocTreeprocessor + + def __init__(self, **kwargs): + self.config = { + "marker": ['[TOC]', + 'Text to find and replace with Table of Contents - ' + 'Set to an empty string to disable. Defaults to "[TOC]"'], + "title": ["", + "Title to insert into TOC <div> - " + "Defaults to an empty string"], + "toc_class": ['toc', + 'CSS class(es) used for the link. ' + 'Defaults to "toclink"'], + "anchorlink": [False, + "True if header should be a self link - " + "Defaults to False"], + "anchorlink_class": ['toclink', + 'CSS class(es) used for the link. ' + 'Defaults to "toclink"'], + "permalink": [0, + "True or link text if a Sphinx-style permalink should " + "be added - Defaults to False"], + "permalink_class": ['headerlink', + 'CSS class(es) used for the link. ' + 'Defaults to "headerlink"'], + "permalink_title": ["Permanent link", + "Title attribute of the permalink - " + "Defaults to 'Permanent link'"], + "baselevel": ['1', 'Base level for headers.'], + "slugify": [slugify, + "Function to generate anchors based on header text - " + "Defaults to the headerid ext's slugify function."], + 'separator': ['-', 'Word separator. Defaults to "-".'], + "toc_depth": [6, + 'Define the range of section levels to include in' + 'the Table of Contents. A single integer (b) defines' + 'the bottom section level (<h1>..<hb>) only.' + 'A string consisting of two digits separated by a hyphen' + 'in between ("2-5"), define the top (t) and the' + 'bottom (b) (<ht>..<hb>). Defaults to `6` (bottom).'], + } + + super().__init__(**kwargs) + + def extendMarkdown(self, md): + md.registerExtension(self) + self.md = md + self.reset() + tocext = self.TreeProcessorClass(md, self.getConfigs()) + # Headerid ext is set to '>prettify'. With this set to '_end', + # it should always come after headerid ext (and honor ids assigned + # by the header id extension) if both are used. Same goes for + # attr_list extension. This must come last because we don't want + # to redefine ids after toc is created. But we do want toc prettified. + md.treeprocessors.register(tocext, 'toc', 5) + + def reset(self): + self.md.toc = '' + self.md.toc_tokens = [] + + +def makeExtension(**kwargs): # pragma: no cover + return TocExtension(**kwargs) diff --git a/SSG/markdown/extensions/wikilinks.py b/SSG/markdown/extensions/wikilinks.py new file mode 100644 index 0000000..cddee7a --- /dev/null +++ b/SSG/markdown/extensions/wikilinks.py @@ -0,0 +1,87 @@ +''' +WikiLinks Extension for Python-Markdown +====================================== + +Converts [[WikiLinks]] to relative links. + +See <https://Python-Markdown.github.io/extensions/wikilinks> +for documentation. + +Original code Copyright [Waylan Limberg](http://achinghead.com/). + +All changes Copyright The Python Markdown Project + +License: [BSD](https://opensource.org/licenses/bsd-license.php) + +''' + +from . import Extension +from ..inlinepatterns import InlineProcessor +import xml.etree.ElementTree as etree +import re + + +def build_url(label, base, end): + """ Build a url from the label, a base, and an end. """ + clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) + return '{}{}{}'.format(base, clean_label, end) + + +class WikiLinkExtension(Extension): + + def __init__(self, **kwargs): + self.config = { + 'base_url': ['/', 'String to append to beginning or URL.'], + 'end_url': ['/', 'String to append to end of URL.'], + 'html_class': ['wikilink', 'CSS hook. Leave blank for none.'], + 'build_url': [build_url, 'Callable formats URL from label.'], + } + + super().__init__(**kwargs) + + def extendMarkdown(self, md): + self.md = md + + # append to end of inline patterns + WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]' + wikilinkPattern = WikiLinksInlineProcessor(WIKILINK_RE, self.getConfigs()) + wikilinkPattern.md = md + md.inlinePatterns.register(wikilinkPattern, 'wikilink', 75) + + +class WikiLinksInlineProcessor(InlineProcessor): + def __init__(self, pattern, config): + super().__init__(pattern) + self.config = config + + def handleMatch(self, m, data): + if m.group(1).strip(): + base_url, end_url, html_class = self._getMeta() + label = m.group(1).strip() + url = self.config['build_url'](label, base_url, end_url) + a = etree.Element('a') + a.text = label + a.set('href', url) + if html_class: + a.set('class', html_class) + else: + a = '' + return a, m.start(0), m.end(0) + + def _getMeta(self): + """ Return meta data or config data. """ + base_url = self.config['base_url'] + end_url = self.config['end_url'] + html_class = self.config['html_class'] + if hasattr(self.md, 'Meta'): + if 'wiki_base_url' in self.md.Meta: + base_url = self.md.Meta['wiki_base_url'][0] + if 'wiki_end_url' in self.md.Meta: + end_url = self.md.Meta['wiki_end_url'][0] + if 'wiki_html_class' in self.md.Meta: + html_class = self.md.Meta['wiki_html_class'][0] + return base_url, end_url, html_class + + +def makeExtension(**kwargs): # pragma: no cover + return WikiLinkExtension(**kwargs) diff --git a/SSG/markdown/htmlparser.py b/SSG/markdown/htmlparser.py new file mode 100644 index 0000000..3512d1a --- /dev/null +++ b/SSG/markdown/htmlparser.py @@ -0,0 +1,323 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2020 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import re +import importlib.util +import sys + + +# Import a copy of the html.parser lib as `htmlparser` so we can monkeypatch it. +# Users can still do `from html import parser` and get the default behavior. +spec = importlib.util.find_spec('html.parser') +htmlparser = importlib.util.module_from_spec(spec) +spec.loader.exec_module(htmlparser) +sys.modules['htmlparser'] = htmlparser + +# Monkeypatch HTMLParser to only accept `?>` to close Processing Instructions. +htmlparser.piclose = re.compile(r'\?>') +# Monkeypatch HTMLParser to only recognize entity references with a closing semicolon. +htmlparser.entityref = re.compile(r'&([a-zA-Z][-.a-zA-Z0-9]*);') +# Monkeypatch HTMLParser to no longer support partial entities. We are always feeding a complete block, +# so the 'incomplete' functionality is unnecessary. As the entityref regex is run right before incomplete, +# and the two regex are the same, then incomplete will simply never match and we avoid the logic within. +htmlparser.incomplete = htmlparser.entityref +# Monkeypatch HTMLParser to not accept a backtick in a tag name, attribute name, or bare value. +htmlparser.locatestarttagend_tolerant = re.compile(r""" + <[a-zA-Z][^`\t\n\r\f />\x00]* # tag name <= added backtick here + (?:[\s/]* # optional whitespace before attribute name + (?:(?<=['"\s/])[^`\s/>][^\s/=>]* # attribute name <= added backtick here + (?:\s*=+\s* # value indicator + (?:'[^']*' # LITA-enclosed value + |"[^"]*" # LIT-enclosed value + |(?!['"])[^`>\s]* # bare value <= added backtick here + ) + (?:\s*,)* # possibly followed by a comma + )?(?:\s|/(?!>))* + )* + )? + \s* # trailing whitespace +""", re.VERBOSE) + +# Match a blank line at the start of a block of text (two newlines). +# The newlines may be preceded by additional whitespace. +blank_line_re = re.compile(r'^([ ]*\n){2}') + + +class HTMLExtractor(htmlparser.HTMLParser): + """ + Extract raw HTML from text. + + The raw HTML is stored in the `htmlStash` of the Markdown instance passed + to `md` and the remaining text is stored in `cleandoc` as a list of strings. + """ + + def __init__(self, md, *args, **kwargs): + if 'convert_charrefs' not in kwargs: + kwargs['convert_charrefs'] = False + + # Block tags that should contain no content (self closing) + self.empty_tags = set(['hr']) + + # This calls self.reset + super().__init__(*args, **kwargs) + self.md = md + + def reset(self): + """Reset this instance. Loses all unprocessed data.""" + self.inraw = False + self.intail = False + self.stack = [] # When inraw==True, stack contains a list of tags + self._cache = [] + self.cleandoc = [] + super().reset() + + def close(self): + """Handle any buffered data.""" + super().close() + if len(self.rawdata): + # Temp fix for https://bugs.python.org/issue41989 + # TODO: remove this when the bug is fixed in all supported Python versions. + if self.convert_charrefs and not self.cdata_elem: # pragma: no cover + self.handle_data(htmlparser.unescape(self.rawdata)) + else: + self.handle_data(self.rawdata) + # Handle any unclosed tags. + if len(self._cache): + self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) + self._cache = [] + + @property + def line_offset(self): + """Returns char index in self.rawdata for the start of the current line. """ + if self.lineno > 1 and '\n' in self.rawdata: + m = re.match(r'([^\n]*\n){{{}}}'.format(self.lineno-1), self.rawdata) + if m: + return m.end() + else: # pragma: no cover + # Value of self.lineno must exceed total number of lines. + # Find index of beginning of last line. + return self.rawdata.rfind('\n') + return 0 + + def at_line_start(self): + """ + Returns True if current position is at start of line. + + Allows for up to three blank spaces at start of line. + """ + if self.offset == 0: + return True + if self.offset > 3: + return False + # Confirm up to first 3 chars are whitespace + return self.rawdata[self.line_offset:self.line_offset + self.offset].strip() == '' + + def get_endtag_text(self, tag): + """ + Returns the text of the end tag. + + If it fails to extract the actual text from the raw data, it builds a closing tag with `tag`. + """ + # Attempt to extract actual tag from raw source text + start = self.line_offset + self.offset + m = htmlparser.endendtag.search(self.rawdata, start) + if m: + return self.rawdata[start:m.end()] + else: # pragma: no cover + # Failed to extract from raw data. Assume well formed and lowercase. + return '</{}>'.format(tag) + + def handle_starttag(self, tag, attrs): + # Handle tags that should always be empty and do not specify a closing tag + if tag in self.empty_tags: + self.handle_startendtag(tag, attrs) + return + + if self.md.is_block_level(tag) and (self.intail or (self.at_line_start() and not self.inraw)): + # Started a new raw block. Prepare stack. + self.inraw = True + self.cleandoc.append('\n') + + text = self.get_starttag_text() + if self.inraw: + self.stack.append(tag) + self._cache.append(text) + else: + self.cleandoc.append(text) + if tag in self.CDATA_CONTENT_ELEMENTS: + # This is presumably a standalone tag in a code span (see #1036). + self.clear_cdata_mode() + + def handle_endtag(self, tag): + text = self.get_endtag_text(tag) + + if self.inraw: + self._cache.append(text) + if tag in self.stack: + # Remove tag from stack + while self.stack: + if self.stack.pop() == tag: + break + if len(self.stack) == 0: + # End of raw block. + if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(text):]): + # Preserve blank line and end of raw block. + self._cache.append('\n') + else: + # More content exists after endtag. + self.intail = True + # Reset stack. + self.inraw = False + self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) + # Insert blank line between this and next line. + self.cleandoc.append('\n\n') + self._cache = [] + else: + self.cleandoc.append(text) + + def handle_data(self, data): + if self.intail and '\n' in data: + self.intail = False + if self.inraw: + self._cache.append(data) + else: + self.cleandoc.append(data) + + def handle_empty_tag(self, data, is_block): + """ Handle empty tags (`<data>`). """ + if self.inraw or self.intail: + # Append this to the existing raw block + self._cache.append(data) + elif self.at_line_start() and is_block: + # Handle this as a standalone raw block + if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(data):]): + # Preserve blank line after tag in raw block. + data += '\n' + else: + # More content exists after tag. + self.intail = True + item = self.cleandoc[-1] if self.cleandoc else '' + # If we only have one newline before block element, add another + if not item.endswith('\n\n') and item.endswith('\n'): + self.cleandoc.append('\n') + self.cleandoc.append(self.md.htmlStash.store(data)) + # Insert blank line between this and next line. + self.cleandoc.append('\n\n') + else: + self.cleandoc.append(data) + + def handle_startendtag(self, tag, attrs): + self.handle_empty_tag(self.get_starttag_text(), is_block=self.md.is_block_level(tag)) + + def handle_charref(self, name): + self.handle_empty_tag('&#{};'.format(name), is_block=False) + + def handle_entityref(self, name): + self.handle_empty_tag('&{};'.format(name), is_block=False) + + def handle_comment(self, data): + self.handle_empty_tag('<!--{}-->'.format(data), is_block=True) + + def handle_decl(self, data): + self.handle_empty_tag('<!{}>'.format(data), is_block=True) + + def handle_pi(self, data): + self.handle_empty_tag('<?{}?>'.format(data), is_block=True) + + def unknown_decl(self, data): + end = ']]>' if data.startswith('CDATA[') else ']>' + self.handle_empty_tag('<![{}{}'.format(data, end), is_block=True) + + def parse_pi(self, i): + if self.at_line_start() or self.intail: + return super().parse_pi(i) + # This is not the beginning of a raw block so treat as plain data + # and avoid consuming any tags which may follow (see #1066). + self.handle_data('<?') + return i + 2 + + def parse_html_declaration(self, i): + if self.at_line_start() or self.intail: + return super().parse_html_declaration(i) + # This is not the beginning of a raw block so treat as plain data + # and avoid consuming any tags which may follow (see #1066). + self.handle_data('<!') + return i + 2 + + # The rest has been copied from base class in standard lib to address #1036. + # As __startag_text is private, all references to it must be in this subclass. + # The last few lines of parse_starttag are reversed so that handle_starttag + # can override cdata_mode in certain situations (in a code span). + __starttag_text = None + + def get_starttag_text(self): + """Return full source of start tag: '<...>'.""" + return self.__starttag_text + + def parse_starttag(self, i): # pragma: no cover + self.__starttag_text = None + endpos = self.check_for_whole_start_tag(i) + if endpos < 0: + return endpos + rawdata = self.rawdata + self.__starttag_text = rawdata[i:endpos] + + # Now parse the data between i+1 and j into a tag and attrs + attrs = [] + match = htmlparser.tagfind_tolerant.match(rawdata, i+1) + assert match, 'unexpected call to parse_starttag()' + k = match.end() + self.lasttag = tag = match.group(1).lower() + while k < endpos: + m = htmlparser.attrfind_tolerant.match(rawdata, k) + if not m: + break + attrname, rest, attrvalue = m.group(1, 2, 3) + if not rest: + attrvalue = None + elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ + attrvalue[:1] == '"' == attrvalue[-1:]: # noqa: E127 + attrvalue = attrvalue[1:-1] + if attrvalue: + attrvalue = htmlparser.unescape(attrvalue) + attrs.append((attrname.lower(), attrvalue)) + k = m.end() + + end = rawdata[k:endpos].strip() + if end not in (">", "/>"): + lineno, offset = self.getpos() + if "\n" in self.__starttag_text: + lineno = lineno + self.__starttag_text.count("\n") + offset = len(self.__starttag_text) \ + - self.__starttag_text.rfind("\n") # noqa: E127 + else: + offset = offset + len(self.__starttag_text) + self.handle_data(rawdata[i:endpos]) + return endpos + if end.endswith('/>'): + # XHTML-style empty tag: <span attr="value" /> + self.handle_startendtag(tag, attrs) + else: + # *** set cdata_mode first so we can override it in handle_starttag (see #1036) *** + if tag in self.CDATA_CONTENT_ELEMENTS: + self.set_cdata_mode(tag) + self.handle_starttag(tag, attrs) + return endpos diff --git a/SSG/markdown/inlinepatterns.py b/SSG/markdown/inlinepatterns.py new file mode 100644 index 0000000..eb313bd --- /dev/null +++ b/SSG/markdown/inlinepatterns.py @@ -0,0 +1,886 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). + +INLINE PATTERNS +============================================================================= + +Inline patterns such as *emphasis* are handled by means of auxiliary +objects, one per pattern. Pattern objects must be instances of classes +that extend markdown.Pattern. Each pattern object uses a single regular +expression and needs support the following methods: + + pattern.getCompiledRegExp() # returns a regular expression + + pattern.handleMatch(m) # takes a match object and returns + # an ElementTree element or just plain text + +All of python markdown's built-in patterns subclass from Pattern, +but you can add additional patterns that don't. + +Also note that all the regular expressions used by inline must +capture the whole block. For this reason, they all start with +'^(.*)' and end with '(.*)!'. In case with built-in expression +Pattern takes care of adding the "^(.*)" and "(.*)!". + +Finally, the order in which regular expressions are applied is very +important - e.g. if we first replace http://.../ links with <a> tags +and _then_ try to replace inline html, we would end up with a mess. +So, we apply the expressions in the following order: + +* escape and backticks have to go before everything else, so + that we can preempt any markdown patterns by escaping them. + +* then we handle auto-links (must be done before inline html) + +* then we handle inline HTML. At this point we will simply + replace all inline HTML strings with a placeholder and add + the actual HTML to a hash. + +* then inline images (must be done before links) + +* then bracketed links, first regular then reference-style + +* finally we apply strong and emphasis +""" + +from . import util +from collections import namedtuple +import re +import xml.etree.ElementTree as etree +try: # pragma: no cover + from html import entities +except ImportError: # pragma: no cover + import htmlentitydefs as entities + + +def build_inlinepatterns(md, **kwargs): + """ Build the default set of inline patterns for Markdown. """ + inlinePatterns = util.Registry() + inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190) + inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180) + inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170) + inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160) + inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150) + inlinePatterns.register( + ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140 + ) + inlinePatterns.register( + ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130 + ) + inlinePatterns.register( + ShortImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'short_image_ref', 125 + ) + inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120) + inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110) + inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100) + inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90) + inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80) + inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70) + inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60) + inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50) + return inlinePatterns + + +""" +The actual regular expressions for patterns +----------------------------------------------------------------------------- +""" + +NOIMG = r'(?<!\!)' + +# `e=f()` or ``e=f("`")`` +BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\2(?!`))' + +# \< +ESCAPE_RE = r'\\(.)' + +# *emphasis* +EMPHASIS_RE = r'(\*)([^\*]+)\1' + +# **strong** +STRONG_RE = r'(\*{2})(.+?)\1' + +# __smart__strong__ +SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\1(?!\w)' + +# _smart_emphasis_ +SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\1(?!\w)' + +# __strong _em__ +SMART_STRONG_EM_RE = r'(?<!\w)(\_)\1(?!\1)(.+?)(?<!\w)\1(?!\1)(.+?)\1{3}(?!\w)' + +# ***strongem*** or ***em*strong** +EM_STRONG_RE = r'(\*)\1{2}(.+?)\1(.*?)\1{2}' + +# ___strongem___ or ___em_strong__ +EM_STRONG2_RE = r'(_)\1{2}(.+?)\1(.*?)\1{2}' + +# ***strong**em* +STRONG_EM_RE = r'(\*)\1{2}(.+?)\1{2}(.*?)\1' + +# ___strong__em_ +STRONG_EM2_RE = r'(_)\1{2}(.+?)\1{2}(.*?)\1' + +# **strong*em*** +STRONG_EM3_RE = r'(\*)\1(?!\1)([^*]+?)\1(?!\1)(.+?)\1{3}' + +# [text](url) or [text](<url>) or [text](url "title") +LINK_RE = NOIMG + r'\[' + +# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>) +IMAGE_LINK_RE = r'\!\[' + +# [Google][3] +REFERENCE_RE = LINK_RE + +# ![alt text][2] +IMAGE_REFERENCE_RE = IMAGE_LINK_RE + +# stand-alone * or _ +NOT_STRONG_RE = r'((^|\s)(\*|_)(\s|$))' + +# <http://www.123.com> +AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>' + +# <me@example.com> +AUTOMAIL_RE = r'<([^<> !]+@[^@<> ]+)>' + +# <...> +HTML_RE = r'(<(\/?[a-zA-Z][^<>@ ]*( [^<>]*)?|!--(?:(?!<!--|-->).)*--)>)' + +# "&" (decimal) or "&" (hex) or "&" (named) +ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)' + +# two spaces at end of line +LINE_BREAK_RE = r' \n' + + +def dequote(string): + """Remove quotes from around a string.""" + if ((string.startswith('"') and string.endswith('"')) or + (string.startswith("'") and string.endswith("'"))): + return string[1:-1] + else: + return string + + +class EmStrongItem(namedtuple('EmStrongItem', ['pattern', 'builder', 'tags'])): + """Emphasis/strong pattern item.""" + + +""" +The pattern classes +----------------------------------------------------------------------------- +""" + + +class Pattern: # pragma: no cover + """Base class that inline patterns subclass. """ + + ANCESTOR_EXCLUDES = tuple() + + def __init__(self, pattern, md=None): + """ + Create an instant of an inline pattern. + + Keyword arguments: + + * pattern: A regular expression that matches a pattern + + """ + self.pattern = pattern + self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern, + re.DOTALL | re.UNICODE) + + self.md = md + + def getCompiledRegExp(self): + """ Return a compiled regular expression. """ + return self.compiled_re + + def handleMatch(self, m): + """Return a ElementTree element from the given match. + + Subclasses should override this method. + + Keyword arguments: + + * m: A re match object containing a match of the pattern. + + """ + pass # pragma: no cover + + def type(self): + """ Return class name, to define pattern type """ + return self.__class__.__name__ + + def unescape(self, text): + """ Return unescaped text given text with an inline placeholder. """ + try: + stash = self.md.treeprocessors['inline'].stashed_nodes + except KeyError: # pragma: no cover + return text + + def get_stash(m): + id = m.group(1) + if id in stash: + value = stash.get(id) + if isinstance(value, str): + return value + else: + # An etree Element - return text content only + return ''.join(value.itertext()) + return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) + + +class InlineProcessor(Pattern): + """ + Base class that inline patterns subclass. + + This is the newer style inline processor that uses a more + efficient and flexible search approach. + """ + + def __init__(self, pattern, md=None): + """ + Create an instant of an inline pattern. + + Keyword arguments: + + * pattern: A regular expression that matches a pattern + + """ + self.pattern = pattern + self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE) + + # Api for Markdown to pass safe_mode into instance + self.safe_mode = False + self.md = md + + def handleMatch(self, m, data): + """Return a ElementTree element from the given match and the + start and end index of the matched text. + + If `start` and/or `end` are returned as `None`, it will be + assumed that the processor did not find a valid region of text. + + Subclasses should override this method. + + Keyword arguments: + + * m: A re match object containing a match of the pattern. + * data: The buffer current under analysis + + Returns: + + * el: The ElementTree element, text or None. + * start: The start of the region that has been matched or None. + * end: The end of the region that has been matched or None. + + """ + pass # pragma: no cover + + +class SimpleTextPattern(Pattern): # pragma: no cover + """ Return a simple text of group(2) of a Pattern. """ + def handleMatch(self, m): + return m.group(2) + + +class SimpleTextInlineProcessor(InlineProcessor): + """ Return a simple text of group(1) of a Pattern. """ + def handleMatch(self, m, data): + return m.group(1), m.start(0), m.end(0) + + +class EscapeInlineProcessor(InlineProcessor): + """ Return an escaped character. """ + + def handleMatch(self, m, data): + char = m.group(1) + if char in self.md.ESCAPED_CHARS: + return '{}{}{}'.format(util.STX, ord(char), util.ETX), m.start(0), m.end(0) + else: + return None, m.start(0), m.end(0) + + +class SimpleTagPattern(Pattern): # pragma: no cover + """ + Return element of type `tag` with a text attribute of group(3) + of a Pattern. + + """ + def __init__(self, pattern, tag): + Pattern.__init__(self, pattern) + self.tag = tag + + def handleMatch(self, m): + el = etree.Element(self.tag) + el.text = m.group(3) + return el + + +class SimpleTagInlineProcessor(InlineProcessor): + """ + Return element of type `tag` with a text attribute of group(2) + of a Pattern. + + """ + def __init__(self, pattern, tag): + InlineProcessor.__init__(self, pattern) + self.tag = tag + + def handleMatch(self, m, data): # pragma: no cover + el = etree.Element(self.tag) + el.text = m.group(2) + return el, m.start(0), m.end(0) + + +class SubstituteTagPattern(SimpleTagPattern): # pragma: no cover + """ Return an element of type `tag` with no children. """ + def handleMatch(self, m): + return etree.Element(self.tag) + + +class SubstituteTagInlineProcessor(SimpleTagInlineProcessor): + """ Return an element of type `tag` with no children. """ + def handleMatch(self, m, data): + return etree.Element(self.tag), m.start(0), m.end(0) + + +class BacktickInlineProcessor(InlineProcessor): + """ Return a `<code>` element containing the matching text. """ + def __init__(self, pattern): + InlineProcessor.__init__(self, pattern) + self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX) + self.tag = 'code' + + def handleMatch(self, m, data): + if m.group(3): + el = etree.Element(self.tag) + el.text = util.AtomicString(util.code_escape(m.group(3).strip())) + return el, m.start(0), m.end(0) + else: + return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0) + + +class DoubleTagPattern(SimpleTagPattern): # pragma: no cover + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m): + tag1, tag2 = self.tag.split(",") + el1 = etree.Element(tag1) + el2 = etree.SubElement(el1, tag2) + el2.text = m.group(3) + if len(m.groups()) == 5: + el2.tail = m.group(4) + return el1 + + +class DoubleTagInlineProcessor(SimpleTagInlineProcessor): + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m, data): # pragma: no cover + tag1, tag2 = self.tag.split(",") + el1 = etree.Element(tag1) + el2 = etree.SubElement(el1, tag2) + el2.text = m.group(2) + if len(m.groups()) == 3: + el2.tail = m.group(3) + return el1, m.start(0), m.end(0) + + +class HtmlInlineProcessor(InlineProcessor): + """ Store raw inline html and return a placeholder. """ + def handleMatch(self, m, data): + rawhtml = self.unescape(m.group(1)) + place_holder = self.md.htmlStash.store(rawhtml) + return place_holder, m.start(0), m.end(0) + + def unescape(self, text): + """ Return unescaped text given text with an inline placeholder. """ + try: + stash = self.md.treeprocessors['inline'].stashed_nodes + except KeyError: # pragma: no cover + return text + + def get_stash(m): + id = m.group(1) + value = stash.get(id) + if value is not None: + try: + return self.md.serializer(value) + except Exception: + return r'\%s' % value + + return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) + + +class AsteriskProcessor(InlineProcessor): + """Emphasis processor for handling strong and em matches inside asterisks.""" + + PATTERNS = [ + EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), + EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), + EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), + EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), + EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') + ] + + def build_single(self, m, tag, idx): + """Return single tag.""" + el1 = etree.Element(tag) + text = m.group(2) + self.parse_sub_patterns(text, el1, None, idx) + return el1 + + def build_double(self, m, tags, idx): + """Return double tag.""" + + tag1, tag2 = tags.split(",") + el1 = etree.Element(tag1) + el2 = etree.Element(tag2) + text = m.group(2) + self.parse_sub_patterns(text, el2, None, idx) + el1.append(el2) + if len(m.groups()) == 3: + text = m.group(3) + self.parse_sub_patterns(text, el1, el2, idx) + return el1 + + def build_double2(self, m, tags, idx): + """Return double tags (variant 2): `<strong>text <em>text</em></strong>`.""" + + tag1, tag2 = tags.split(",") + el1 = etree.Element(tag1) + el2 = etree.Element(tag2) + text = m.group(2) + self.parse_sub_patterns(text, el1, None, idx) + text = m.group(3) + el1.append(el2) + self.parse_sub_patterns(text, el2, None, idx) + return el1 + + def parse_sub_patterns(self, data, parent, last, idx): + """ + Parses sub patterns. + + `data` (`str`): + text to evaluate. + + `parent` (`etree.Element`): + Parent to attach text and sub elements to. + + `last` (`etree.Element`): + Last appended child to parent. Can also be None if parent has no children. + + `idx` (`int`): + Current pattern index that was used to evaluate the parent. + + """ + + offset = 0 + pos = 0 + + length = len(data) + while pos < length: + # Find the start of potential emphasis or strong tokens + if self.compiled_re.match(data, pos): + matched = False + # See if the we can match an emphasis/strong pattern + for index, item in enumerate(self.PATTERNS): + # Only evaluate patterns that are after what was used on the parent + if index <= idx: + continue + m = item.pattern.match(data, pos) + if m: + # Append child nodes to parent + # Text nodes should be appended to the last + # child if present, and if not, it should + # be added as the parent's text node. + text = data[offset:m.start(0)] + if text: + if last is not None: + last.tail = text + else: + parent.text = text + el = self.build_element(m, item.builder, item.tags, index) + parent.append(el) + last = el + # Move our position past the matched hunk + offset = pos = m.end(0) + matched = True + if not matched: + # We matched nothing, move on to the next character + pos += 1 + else: + # Increment position as no potential emphasis start was found. + pos += 1 + + # Append any leftover text as a text node. + text = data[offset:] + if text: + if last is not None: + last.tail = text + else: + parent.text = text + + def build_element(self, m, builder, tags, index): + """Element builder.""" + + if builder == 'double2': + return self.build_double2(m, tags, index) + elif builder == 'double': + return self.build_double(m, tags, index) + else: + return self.build_single(m, tags, index) + + def handleMatch(self, m, data): + """Parse patterns.""" + + el = None + start = None + end = None + + for index, item in enumerate(self.PATTERNS): + m1 = item.pattern.match(data, m.start(0)) + if m1: + start = m1.start(0) + end = m1.end(0) + el = self.build_element(m1, item.builder, item.tags, index) + break + return el, start, end + + +class UnderscoreProcessor(AsteriskProcessor): + """Emphasis processor for handling strong and em matches inside underscores.""" + + PATTERNS = [ + EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), + EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), + EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), + EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), + EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') + ] + + +class LinkInlineProcessor(InlineProcessor): + """ Return a link element from the given match. """ + RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE) + RE_TITLE_CLEAN = re.compile(r'\s') + + def handleMatch(self, m, data): + text, index, handled = self.getText(data, m.end(0)) + + if not handled: + return None, None, None + + href, title, index, handled = self.getLink(data, index) + if not handled: + return None, None, None + + el = etree.Element("a") + el.text = text + + el.set("href", href) + + if title is not None: + el.set("title", title) + + return el, m.start(0), index + + def getLink(self, data, index): + """Parse data between `()` of `[Text]()` allowing recursive `()`. """ + + href = '' + title = None + handled = False + + m = self.RE_LINK.match(data, pos=index) + if m and m.group(1): + # Matches [Text](<link> "title") + href = m.group(1)[1:-1].strip() + if m.group(2): + title = m.group(2)[1:-1] + index = m.end(0) + handled = True + elif m: + # Track bracket nesting and index in string + bracket_count = 1 + backtrack_count = 1 + start_index = m.end() + index = start_index + last_bracket = -1 + + # Primary (first found) quote tracking. + quote = None + start_quote = -1 + exit_quote = -1 + ignore_matches = False + + # Secondary (second found) quote tracking. + alt_quote = None + start_alt_quote = -1 + exit_alt_quote = -1 + + # Track last character + last = '' + + for pos in range(index, len(data)): + c = data[pos] + if c == '(': + # Count nested ( + # Don't increment the bracket count if we are sure we're in a title. + if not ignore_matches: + bracket_count += 1 + elif backtrack_count > 0: + backtrack_count -= 1 + elif c == ')': + # Match nested ) to ( + # Don't decrement if we are sure we are in a title that is unclosed. + if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)): + bracket_count = 0 + elif not ignore_matches: + bracket_count -= 1 + elif backtrack_count > 0: + backtrack_count -= 1 + # We've found our backup end location if the title doesn't resolve. + if backtrack_count == 0: + last_bracket = index + 1 + + elif c in ("'", '"'): + # Quote has started + if not quote: + # We'll assume we are now in a title. + # Brackets are quoted, so no need to match them (except for the final one). + ignore_matches = True + backtrack_count = bracket_count + bracket_count = 1 + start_quote = index + 1 + quote = c + # Secondary quote (in case the first doesn't resolve): [text](link'"title") + elif c != quote and not alt_quote: + start_alt_quote = index + 1 + alt_quote = c + # Update primary quote match + elif c == quote: + exit_quote = index + 1 + # Update secondary quote match + elif alt_quote and c == alt_quote: + exit_alt_quote = index + 1 + + index += 1 + + # Link is closed, so let's break out of the loop + if bracket_count == 0: + # Get the title if we closed a title string right before link closed + if exit_quote >= 0 and quote == last: + href = data[start_index:start_quote - 1] + title = ''.join(data[start_quote:exit_quote - 1]) + elif exit_alt_quote >= 0 and alt_quote == last: + href = data[start_index:start_alt_quote - 1] + title = ''.join(data[start_alt_quote:exit_alt_quote - 1]) + else: + href = data[start_index:index - 1] + break + + if c != ' ': + last = c + + # We have a scenario: [test](link"notitle) + # When we enter a string, we stop tracking bracket resolution in the main counter, + # but we do keep a backup counter up until we discover where we might resolve all brackets + # if the title string fails to resolve. + if bracket_count != 0 and backtrack_count == 0: + href = data[start_index:last_bracket - 1] + index = last_bracket + bracket_count = 0 + + handled = bracket_count == 0 + + if title is not None: + title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip()))) + + href = self.unescape(href).strip() + + return href, title, index, handled + + def getText(self, data, index): + """Parse the content between `[]` of the start of an image or link + resolving nested square brackets. + + """ + bracket_count = 1 + text = [] + for pos in range(index, len(data)): + c = data[pos] + if c == ']': + bracket_count -= 1 + elif c == '[': + bracket_count += 1 + index += 1 + if bracket_count == 0: + break + text.append(c) + return ''.join(text), index, bracket_count == 0 + + +class ImageInlineProcessor(LinkInlineProcessor): + """ Return a img element from the given match. """ + + def handleMatch(self, m, data): + text, index, handled = self.getText(data, m.end(0)) + if not handled: + return None, None, None + + src, title, index, handled = self.getLink(data, index) + if not handled: + return None, None, None + + el = etree.Element("img") + + el.set("src", src) + + if title is not None: + el.set("title", title) + + el.set('alt', self.unescape(text)) + return el, m.start(0), index + + +class ReferenceInlineProcessor(LinkInlineProcessor): + """ Match to a stored reference and return link element. """ + NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE) + + RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE) + + def handleMatch(self, m, data): + text, index, handled = self.getText(data, m.end(0)) + if not handled: + return None, None, None + + id, end, handled = self.evalId(data, index, text) + if not handled: + return None, None, None + + # Clean up linebreaks in id + id = self.NEWLINE_CLEANUP_RE.sub(' ', id) + if id not in self.md.references: # ignore undefined refs + return None, m.start(0), end + + href, title = self.md.references[id] + + return self.makeTag(href, title, text), m.start(0), end + + def evalId(self, data, index, text): + """ + Evaluate the id portion of [ref][id]. + + If [ref][] use [ref]. + """ + m = self.RE_LINK.match(data, pos=index) + if not m: + return None, index, False + else: + id = m.group(1).lower() + end = m.end(0) + if not id: + id = text.lower() + return id, end, True + + def makeTag(self, href, title, text): + el = etree.Element('a') + + el.set('href', href) + if title: + el.set('title', title) + + el.text = text + return el + + +class ShortReferenceInlineProcessor(ReferenceInlineProcessor): + """Short form of reference: [google]. """ + def evalId(self, data, index, text): + """Evaluate the id from of [ref] """ + + return text.lower(), index, True + + +class ImageReferenceInlineProcessor(ReferenceInlineProcessor): + """ Match to a stored reference and return img element. """ + def makeTag(self, href, title, text): + el = etree.Element("img") + el.set("src", href) + if title: + el.set("title", title) + el.set("alt", self.unescape(text)) + return el + + +class ShortImageReferenceInlineProcessor(ImageReferenceInlineProcessor): + """ Short form of inage reference: ![ref]. """ + def evalId(self, data, index, text): + """Evaluate the id from of [ref] """ + + return text.lower(), index, True + + +class AutolinkInlineProcessor(InlineProcessor): + """ Return a link Element given an autolink (`<http://example/com>`). """ + def handleMatch(self, m, data): + el = etree.Element("a") + el.set('href', self.unescape(m.group(1))) + el.text = util.AtomicString(m.group(1)) + return el, m.start(0), m.end(0) + + +class AutomailInlineProcessor(InlineProcessor): + """ + Return a mailto link Element given an automail link (`<foo@example.com>`). + """ + def handleMatch(self, m, data): + el = etree.Element('a') + email = self.unescape(m.group(1)) + if email.startswith("mailto:"): + email = email[len("mailto:"):] + + def codepoint2name(code): + """Return entity definition by code, or the code if not defined.""" + entity = entities.codepoint2name.get(code) + if entity: + return "{}{};".format(util.AMP_SUBSTITUTE, entity) + else: + return "%s#%d;" % (util.AMP_SUBSTITUTE, code) + + letters = [codepoint2name(ord(letter)) for letter in email] + el.text = util.AtomicString(''.join(letters)) + + mailto = "mailto:" + email + mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' % + ord(letter) for letter in mailto]) + el.set('href', mailto) + return el, m.start(0), m.end(0) diff --git a/SSG/markdown/postprocessors.py b/SSG/markdown/postprocessors.py new file mode 100644 index 0000000..498f7e8 --- /dev/null +++ b/SSG/markdown/postprocessors.py @@ -0,0 +1,137 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). + +POST-PROCESSORS +============================================================================= + +Markdown also allows post-processors, which are similar to preprocessors in +that they need to implement a "run" method. However, they are run after core +processing. + +""" + +from collections import OrderedDict +from . import util +import re + + +def build_postprocessors(md, **kwargs): + """ Build the default postprocessors for Markdown. """ + postprocessors = util.Registry() + postprocessors.register(RawHtmlPostprocessor(md), 'raw_html', 30) + postprocessors.register(AndSubstitutePostprocessor(), 'amp_substitute', 20) + return postprocessors + + +class Postprocessor(util.Processor): + """ + Postprocessors are run after the ElementTree it converted back into text. + + Each Postprocessor implements a "run" method that takes a pointer to a + text string, modifies it as necessary and returns a text string. + + Postprocessors must extend markdown.Postprocessor. + + """ + + def run(self, text): + """ + Subclasses of Postprocessor should implement a `run` method, which + takes the html document as a single text string and returns a + (possibly modified) string. + + """ + pass # pragma: no cover + + +class RawHtmlPostprocessor(Postprocessor): + """ Restore raw html to the document. """ + + BLOCK_LEVEL_REGEX = re.compile(r'^\<\/?([^ >]+)') + + def run(self, text): + """ Iterate over html stash and restore html. """ + replacements = OrderedDict() + for i in range(self.md.htmlStash.html_counter): + html = self.stash_to_string(self.md.htmlStash.rawHtmlBlocks[i]) + if self.isblocklevel(html): + replacements["<p>{}</p>".format( + self.md.htmlStash.get_placeholder(i))] = html + replacements[self.md.htmlStash.get_placeholder(i)] = html + + def substitute_match(m): + key = m.group(0) + + if key not in replacements: + if key[3:-4] in replacements: + return f'<p>{ replacements[key[3:-4]] }</p>' + else: + return key + + return replacements[key] + + if replacements: + base_placeholder = util.HTML_PLACEHOLDER % r'([0-9]+)' + pattern = re.compile(f'<p>{ base_placeholder }</p>|{ base_placeholder }') + processed_text = pattern.sub(substitute_match, text) + else: + return text + + if processed_text == text: + return processed_text + else: + return self.run(processed_text) + + def isblocklevel(self, html): + m = self.BLOCK_LEVEL_REGEX.match(html) + if m: + if m.group(1)[0] in ('!', '?', '@', '%'): + # Comment, php etc... + return True + return self.md.is_block_level(m.group(1)) + return False + + def stash_to_string(self, text): + """ Convert a stashed object to a string. """ + return str(text) + + +class AndSubstitutePostprocessor(Postprocessor): + """ Restore valid entities """ + + def run(self, text): + text = text.replace(util.AMP_SUBSTITUTE, "&") + return text + + +@util.deprecated( + "This class will be removed in the future; " + "use 'treeprocessors.UnescapeTreeprocessor' instead." +) +class UnescapePostprocessor(Postprocessor): + """ Restore escaped chars """ + + RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX)) + + def unescape(self, m): + return chr(int(m.group(1))) + + def run(self, text): + return self.RE.sub(self.unescape, text) diff --git a/SSG/markdown/preprocessors.py b/SSG/markdown/preprocessors.py new file mode 100644 index 0000000..e1023c5 --- /dev/null +++ b/SSG/markdown/preprocessors.py @@ -0,0 +1,82 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). + +PRE-PROCESSORS +============================================================================= + +Preprocessors work on source text before we start doing anything too +complicated. +""" + +from . import util +from .htmlparser import HTMLExtractor +import re + + +def build_preprocessors(md, **kwargs): + """ Build the default set of preprocessors used by Markdown. """ + preprocessors = util.Registry() + preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30) + preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) + return preprocessors + + +class Preprocessor(util.Processor): + """ + Preprocessors are run after the text is broken into lines. + + Each preprocessor implements a "run" method that takes a pointer to a + list of lines of the document, modifies it as necessary and returns + either the same pointer or a pointer to a new list. + + Preprocessors must extend markdown.Preprocessor. + + """ + def run(self, lines): + """ + Each subclass of Preprocessor should override the `run` method, which + takes the document as a list of strings split by newlines and returns + the (possibly modified) list of lines. + + """ + pass # pragma: no cover + + +class NormalizeWhitespace(Preprocessor): + """ Normalize whitespace for consistent parsing. """ + + def run(self, lines): + source = '\n'.join(lines) + source = source.replace(util.STX, "").replace(util.ETX, "") + source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n" + source = source.expandtabs(self.md.tab_length) + source = re.sub(r'(?<=\n) +\n', '\n', source) + return source.split('\n') + + +class HtmlBlockPreprocessor(Preprocessor): + """Remove html blocks from the text and store them for later retrieval.""" + + def run(self, lines): + source = '\n'.join(lines) + parser = HTMLExtractor(self.md) + parser.feed(source) + parser.close() + return ''.join(parser.cleandoc).split('\n') diff --git a/SSG/markdown/serializers.py b/SSG/markdown/serializers.py new file mode 100644 index 0000000..59bab18 --- /dev/null +++ b/SSG/markdown/serializers.py @@ -0,0 +1,189 @@ +# markdown/searializers.py +# +# Add x/html serialization to Elementree +# Taken from ElementTree 1.3 preview with slight modifications +# +# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# https://www.pythonware.com/ +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2007 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + + +from xml.etree.ElementTree import ProcessingInstruction +from xml.etree.ElementTree import Comment, ElementTree, QName +import re + +__all__ = ['to_html_string', 'to_xhtml_string'] + +HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", + "img", "input", "isindex", "link", "meta", "param") +RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|\#x[0-9a-f]+|[0-9a-z]+);)', re.I) + +try: + HTML_EMPTY = set(HTML_EMPTY) +except NameError: # pragma: no cover + pass + + +def _raise_serialization_error(text): # pragma: no cover + raise TypeError( + "cannot serialize {!r} (type {})".format(text, type(text).__name__) + ) + + +def _escape_cdata(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _escape_attrib(text): + # escape attribute value + try: + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + if "\n" in text: + text = text.replace("\n", " ") + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _escape_attrib_html(text): + # escape attribute value + try: + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _serialize_html(write, elem, format): + tag = elem.tag + text = elem.text + if tag is Comment: + write("<!--%s-->" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("<?%s?>" % _escape_cdata(text)) + elif tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, format) + else: + namespace_uri = None + if isinstance(tag, QName): + # QNAME objects store their data as a string: `{uri}tag` + if tag.text[:1] == "{": + namespace_uri, tag = tag.text[1:].split("}", 1) + else: + raise ValueError('QName objects must define a tag.') + write("<" + tag) + items = elem.items() + if items: + items = sorted(items) # lexical order + for k, v in items: + if isinstance(k, QName): + # Assume a text only QName + k = k.text + if isinstance(v, QName): + # Assume a text only QName + v = v.text + else: + v = _escape_attrib_html(v) + if k == v and format == 'html': + # handle boolean attributes + write(" %s" % v) + else: + write(' {}="{}"'.format(k, v)) + if namespace_uri: + write(' xmlns="%s"' % (_escape_attrib(namespace_uri))) + if format == "xhtml" and tag.lower() in HTML_EMPTY: + write(" />") + else: + write(">") + if text: + if tag.lower() in ["script", "style"]: + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, format) + if tag.lower() not in HTML_EMPTY: + write("</" + tag + ">") + if elem.tail: + write(_escape_cdata(elem.tail)) + + +def _write_html(root, format="html"): + assert root is not None + data = [] + write = data.append + _serialize_html(write, root, format) + return "".join(data) + + +# -------------------------------------------------------------------- +# public functions + +def to_html_string(element): + return _write_html(ElementTree(element).getroot(), format="html") + + +def to_xhtml_string(element): + return _write_html(ElementTree(element).getroot(), format="xhtml") diff --git a/SSG/markdown/test_tools.py b/SSG/markdown/test_tools.py new file mode 100644 index 0000000..2ce0e74 --- /dev/null +++ b/SSG/markdown/test_tools.py @@ -0,0 +1,220 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import os +import sys +import unittest +import textwrap +from . import markdown, Markdown, util + +try: + import tidylib +except ImportError: + tidylib = None + +__all__ = ['TestCase', 'LegacyTestCase', 'Kwargs'] + + +class TestCase(unittest.TestCase): + """ + A unittest.TestCase subclass with helpers for testing Markdown output. + + Define `default_kwargs` as a dict of keywords to pass to Markdown for each + test. The defaults can be overridden on individual tests. + + The `assertMarkdownRenders` method accepts the source text, the expected + output, and any keywords to pass to Markdown. The `default_kwargs` are used + except where overridden by `kwargs`. The output and expected output are passed + to `TestCase.assertMultiLineEqual`. An AssertionError is raised with a diff + if the actual output does not equal the expected output. + + The `dedent` method is available to dedent triple-quoted strings if + necessary. + + In all other respects, behaves as unittest.TestCase. + """ + + default_kwargs = {} + + def assertMarkdownRenders(self, source, expected, expected_attrs=None, **kwargs): + """ + Test that source Markdown text renders to expected output with given keywords. + + `expected_attrs` accepts a dict. Each key should be the name of an attribute + on the `Markdown` instance and the value should be the expected value after + the source text is parsed by Markdown. After the expected output is tested, + the expected value for each attribute is compared against the actual + attribute of the `Markdown` instance using `TestCase.assertEqual`. + """ + + expected_attrs = expected_attrs or {} + kws = self.default_kwargs.copy() + kws.update(kwargs) + md = Markdown(**kws) + output = md.convert(source) + self.assertMultiLineEqual(output, expected) + for key, value in expected_attrs.items(): + self.assertEqual(getattr(md, key), value) + + def dedent(self, text): + """ + Dedent text. + """ + + # TODO: If/when actual output ends with a newline, then use: + # return textwrap.dedent(text.strip('/n')) + return textwrap.dedent(text).strip() + + +class recursionlimit: + """ + A context manager which temporarily modifies the Python recursion limit. + + The testing framework, coverage, etc. may add an arbitrary number of levels to the depth. To maintain consistency + in the tests, the current stack depth is determined when called, then added to the provided limit. + + Example usage: + + with recursionlimit(20): + # test code here + + See https://stackoverflow.com/a/50120316/866026 + """ + + def __init__(self, limit): + self.limit = util._get_stack_depth() + limit + self.old_limit = sys.getrecursionlimit() + + def __enter__(self): + sys.setrecursionlimit(self.limit) + + def __exit__(self, type, value, tb): + sys.setrecursionlimit(self.old_limit) + + +######################### +# Legacy Test Framework # +######################### + + +class Kwargs(dict): + """ A dict like class for holding keyword arguments. """ + pass + + +def _normalize_whitespace(text): + """ Normalize whitespace for a string of html using tidylib. """ + output, errors = tidylib.tidy_fragment(text, options={ + 'drop_empty_paras': 0, + 'fix_backslash': 0, + 'fix_bad_comments': 0, + 'fix_uri': 0, + 'join_styles': 0, + 'lower_literals': 0, + 'merge_divs': 0, + 'output_xhtml': 1, + 'quote_ampersand': 0, + 'newline': 'LF' + }) + return output + + +class LegacyTestMeta(type): + def __new__(cls, name, bases, dct): + + def generate_test(infile, outfile, normalize, kwargs): + def test(self): + with open(infile, encoding="utf-8") as f: + input = f.read() + with open(outfile, encoding="utf-8") as f: + # Normalize line endings + # (on Windows, git may have altered line endings). + expected = f.read().replace("\r\n", "\n") + output = markdown(input, **kwargs) + if tidylib and normalize: + try: + expected = _normalize_whitespace(expected) + output = _normalize_whitespace(output) + except OSError: + self.skipTest("Tidylib's c library not available.") + elif normalize: + self.skipTest('Tidylib not available.') + self.assertMultiLineEqual(output, expected) + return test + + location = dct.get('location', '') + exclude = dct.get('exclude', []) + normalize = dct.get('normalize', False) + input_ext = dct.get('input_ext', '.txt') + output_ext = dct.get('output_ext', '.html') + kwargs = dct.get('default_kwargs', Kwargs()) + + if os.path.isdir(location): + for file in os.listdir(location): + infile = os.path.join(location, file) + if os.path.isfile(infile): + tname, ext = os.path.splitext(file) + if ext == input_ext: + outfile = os.path.join(location, tname + output_ext) + tname = tname.replace(' ', '_').replace('-', '_') + kws = kwargs.copy() + if tname in dct: + kws.update(dct[tname]) + test_name = 'test_%s' % tname + if tname not in exclude: + dct[test_name] = generate_test(infile, outfile, normalize, kws) + else: + dct[test_name] = unittest.skip('Excluded')(lambda: None) + + return type.__new__(cls, name, bases, dct) + + +class LegacyTestCase(unittest.TestCase, metaclass=LegacyTestMeta): + """ + A `unittest.TestCase` subclass for running Markdown's legacy file-based tests. + + A subclass should define various properties which point to a directory of + text-based test files and define various behaviors/defaults for those tests. + The following properties are supported: + + location: A path to the directory of test files. An absolute path is preferred. + exclude: A list of tests to exclude. Each test name should comprise the filename + without an extension. + normalize: A boolean value indicating if the HTML should be normalized. + Default: `False`. + input_ext: A string containing the file extension of input files. Default: `.txt`. + ouput_ext: A string containing the file extension of expected output files. + Default: `html`. + default_kwargs: A `Kwargs` instance which stores the default set of keyword + arguments for all test files in the directory. + + In addition, properties can be defined for each individual set of test files within + the directory. The property should be given the name of the file without the file + extension. Any spaces and dashes in the filename should be replaced with + underscores. The value of the property should be a `Kwargs` instance which + contains the keyword arguments that should be passed to `Markdown` for that + test file. The keyword arguments will "update" the `default_kwargs`. + + When the class instance is created, it will walk the given directory and create + a separate unitttest for each set of test files using the naming scheme: + `test_filename`. One unittest will be run for each set of input and output files. + """ + pass diff --git a/SSG/markdown/treeprocessors.py b/SSG/markdown/treeprocessors.py new file mode 100644 index 0000000..e9f48ca --- /dev/null +++ b/SSG/markdown/treeprocessors.py @@ -0,0 +1,458 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import re +import xml.etree.ElementTree as etree +from . import util +from . import inlinepatterns + + +def build_treeprocessors(md, **kwargs): + """ Build the default treeprocessors for Markdown. """ + treeprocessors = util.Registry() + treeprocessors.register(InlineProcessor(md), 'inline', 20) + treeprocessors.register(PrettifyTreeprocessor(md), 'prettify', 10) + treeprocessors.register(UnescapeTreeprocessor(md), 'unescape', 0) + return treeprocessors + + +def isString(s): + """ Check if it's string """ + if not isinstance(s, util.AtomicString): + return isinstance(s, str) + return False + + +class Treeprocessor(util.Processor): + """ + Treeprocessors are run on the ElementTree object before serialization. + + Each Treeprocessor implements a "run" method that takes a pointer to an + ElementTree, modifies it as necessary and returns an ElementTree + object. + + Treeprocessors must extend markdown.Treeprocessor. + + """ + def run(self, root): + """ + Subclasses of Treeprocessor should implement a `run` method, which + takes a root ElementTree. This method can return another ElementTree + object, and the existing root ElementTree will be replaced, or it can + modify the current tree and return None. + """ + pass # pragma: no cover + + +class InlineProcessor(Treeprocessor): + """ + A Treeprocessor that traverses a tree, applying inline patterns. + """ + + def __init__(self, md): + self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX + self.__placeholder_suffix = util.ETX + self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + + len(self.__placeholder_suffix) + self.__placeholder_re = util.INLINE_PLACEHOLDER_RE + self.md = md + self.inlinePatterns = md.inlinePatterns + self.ancestors = [] + + def __makePlaceholder(self, type): + """ Generate a placeholder """ + id = "%04d" % len(self.stashed_nodes) + hash = util.INLINE_PLACEHOLDER % id + return hash, id + + def __findPlaceholder(self, data, index): + """ + Extract id from data string, start from index + + Keyword arguments: + + * data: string + * index: index, from which we start search + + Returns: placeholder id and string index, after the found placeholder. + + """ + m = self.__placeholder_re.search(data, index) + if m: + return m.group(1), m.end() + else: + return None, index + 1 + + def __stashNode(self, node, type): + """ Add node to stash """ + placeholder, id = self.__makePlaceholder(type) + self.stashed_nodes[id] = node + return placeholder + + def __handleInline(self, data, patternIndex=0): + """ + Process string with inline patterns and replace it + with placeholders + + Keyword arguments: + + * data: A line of Markdown text + * patternIndex: The index of the inlinePattern to start with + + Returns: String with placeholders. + + """ + if not isinstance(data, util.AtomicString): + startIndex = 0 + count = len(self.inlinePatterns) + while patternIndex < count: + data, matched, startIndex = self.__applyPattern( + self.inlinePatterns[patternIndex], data, patternIndex, startIndex + ) + if not matched: + patternIndex += 1 + return data + + def __processElementText(self, node, subnode, isText=True): + """ + Process placeholders in Element.text or Element.tail + of Elements popped from self.stashed_nodes. + + Keywords arguments: + + * node: parent node + * subnode: processing node + * isText: bool variable, True - it's text, False - it's tail + + Returns: None + + """ + if isText: + text = subnode.text + subnode.text = None + else: + text = subnode.tail + subnode.tail = None + + childResult = self.__processPlaceholders(text, subnode, isText) + + if not isText and node is not subnode: + pos = list(node).index(subnode) + 1 + else: + pos = 0 + + childResult.reverse() + for newChild in childResult: + node.insert(pos, newChild[0]) + + def __processPlaceholders(self, data, parent, isText=True): + """ + Process string with placeholders and generate ElementTree tree. + + Keyword arguments: + + * data: string with placeholders instead of ElementTree elements. + * parent: Element, which contains processing inline data + + Returns: list with ElementTree elements with applied inline patterns. + + """ + def linkText(text): + if text: + if result: + if result[-1][0].tail: + result[-1][0].tail += text + else: + result[-1][0].tail = text + elif not isText: + if parent.tail: + parent.tail += text + else: + parent.tail = text + else: + if parent.text: + parent.text += text + else: + parent.text = text + result = [] + strartIndex = 0 + while data: + index = data.find(self.__placeholder_prefix, strartIndex) + if index != -1: + id, phEndIndex = self.__findPlaceholder(data, index) + + if id in self.stashed_nodes: + node = self.stashed_nodes.get(id) + + if index > 0: + text = data[strartIndex:index] + linkText(text) + + if not isString(node): # it's Element + for child in [node] + list(node): + if child.tail: + if child.tail.strip(): + self.__processElementText( + node, child, False + ) + if child.text: + if child.text.strip(): + self.__processElementText(child, child) + else: # it's just a string + linkText(node) + strartIndex = phEndIndex + continue + + strartIndex = phEndIndex + result.append((node, self.ancestors[:])) + + else: # wrong placeholder + end = index + len(self.__placeholder_prefix) + linkText(data[strartIndex:end]) + strartIndex = end + else: + text = data[strartIndex:] + if isinstance(data, util.AtomicString): + # We don't want to loose the AtomicString + text = util.AtomicString(text) + linkText(text) + data = "" + + return result + + def __applyPattern(self, pattern, data, patternIndex, startIndex=0): + """ + Check if the line fits the pattern, create the necessary + elements, add it to stashed_nodes. + + Keyword arguments: + + * data: the text to be processed + * pattern: the pattern to be checked + * patternIndex: index of current pattern + * startIndex: string index, from which we start searching + + Returns: String with placeholders instead of ElementTree elements. + + """ + new_style = isinstance(pattern, inlinepatterns.InlineProcessor) + + for exclude in pattern.ANCESTOR_EXCLUDES: + if exclude.lower() in self.ancestors: + return data, False, 0 + + if new_style: + match = None + # Since handleMatch may reject our first match, + # we iterate over the buffer looking for matches + # until we can't find any more. + for match in pattern.getCompiledRegExp().finditer(data, startIndex): + node, start, end = pattern.handleMatch(match, data) + if start is None or end is None: + startIndex += match.end(0) + match = None + continue + break + else: # pragma: no cover + match = pattern.getCompiledRegExp().match(data[startIndex:]) + leftData = data[:startIndex] + + if not match: + return data, False, 0 + + if not new_style: # pragma: no cover + node = pattern.handleMatch(match) + start = match.start(0) + end = match.end(0) + + if node is None: + return data, True, end + + if not isString(node): + if not isinstance(node.text, util.AtomicString): + # We need to process current node too + for child in [node] + list(node): + if not isString(node): + if child.text: + self.ancestors.append(child.tag.lower()) + child.text = self.__handleInline( + child.text, patternIndex + 1 + ) + self.ancestors.pop() + if child.tail: + child.tail = self.__handleInline( + child.tail, patternIndex + ) + + placeholder = self.__stashNode(node, pattern.type()) + + if new_style: + return "{}{}{}".format(data[:start], + placeholder, data[end:]), True, 0 + else: # pragma: no cover + return "{}{}{}{}".format(leftData, + match.group(1), + placeholder, match.groups()[-1]), True, 0 + + def __build_ancestors(self, parent, parents): + """Build the ancestor list.""" + ancestors = [] + while parent is not None: + if parent is not None: + ancestors.append(parent.tag.lower()) + parent = self.parent_map.get(parent) + ancestors.reverse() + parents.extend(ancestors) + + def run(self, tree, ancestors=None): + """Apply inline patterns to a parsed Markdown tree. + + Iterate over ElementTree, find elements with inline tag, apply inline + patterns and append newly created Elements to tree. If you don't + want to process your data with inline patterns, instead of normal + string, use subclass AtomicString: + + node.text = markdown.AtomicString("This will not be processed.") + + Arguments: + + * tree: ElementTree object, representing Markdown tree. + * ancestors: List of parent tag names that precede the tree node (if needed). + + Returns: ElementTree object with applied inline patterns. + + """ + self.stashed_nodes = {} + + # Ensure a valid parent list, but copy passed in lists + # to ensure we don't have the user accidentally change it on us. + tree_parents = [] if ancestors is None else ancestors[:] + + self.parent_map = {c: p for p in tree.iter() for c in p} + stack = [(tree, tree_parents)] + + while stack: + currElement, parents = stack.pop() + + self.ancestors = parents + self.__build_ancestors(currElement, self.ancestors) + + insertQueue = [] + for child in currElement: + if child.text and not isinstance( + child.text, util.AtomicString + ): + self.ancestors.append(child.tag.lower()) + text = child.text + child.text = None + lst = self.__processPlaceholders( + self.__handleInline(text), child + ) + for item in lst: + self.parent_map[item[0]] = child + stack += lst + insertQueue.append((child, lst)) + self.ancestors.pop() + if child.tail: + tail = self.__handleInline(child.tail) + dumby = etree.Element('d') + child.tail = None + tailResult = self.__processPlaceholders(tail, dumby, False) + if dumby.tail: + child.tail = dumby.tail + pos = list(currElement).index(child) + 1 + tailResult.reverse() + for newChild in tailResult: + self.parent_map[newChild[0]] = currElement + currElement.insert(pos, newChild[0]) + if len(child): + self.parent_map[child] = currElement + stack.append((child, self.ancestors[:])) + + for element, lst in insertQueue: + for i, obj in enumerate(lst): + newChild = obj[0] + element.insert(i, newChild) + return tree + + +class PrettifyTreeprocessor(Treeprocessor): + """ Add linebreaks to the html document. """ + + def _prettifyETree(self, elem): + """ Recursively add linebreaks to ElementTree children. """ + + i = "\n" + if self.md.is_block_level(elem.tag) and elem.tag not in ['code', 'pre']: + if (not elem.text or not elem.text.strip()) \ + and len(elem) and self.md.is_block_level(elem[0].tag): + elem.text = i + for e in elem: + if self.md.is_block_level(e.tag): + self._prettifyETree(e) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + + def run(self, root): + """ Add linebreaks to ElementTree root object. """ + + self._prettifyETree(root) + # Do <br />'s separately as they are often in the middle of + # inline content and missed by _prettifyETree. + brs = root.iter('br') + for br in brs: + if not br.tail or not br.tail.strip(): + br.tail = '\n' + else: + br.tail = '\n%s' % br.tail + # Clean up extra empty lines at end of code blocks. + pres = root.iter('pre') + for pre in pres: + if len(pre) and pre[0].tag == 'code': + code = pre[0] + # Only prettify code containing text only + if not len(code) and code.text is not None: + code.text = util.AtomicString(code.text.rstrip() + '\n') + + +class UnescapeTreeprocessor(Treeprocessor): + """ Restore escaped chars """ + + RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX)) + + def _unescape(self, m): + return chr(int(m.group(1))) + + def unescape(self, text): + return self.RE.sub(self._unescape, text) + + def run(self, root): + """ Loop over all elements and unescape all text. """ + for elem in root.iter(): + # Unescape text content + if elem.text and not elem.tag == 'code': + elem.text = self.unescape(elem.text) + # Unescape tail content + if elem.tail: + elem.tail = self.unescape(elem.tail) + # Unescape attribute values + for key, value in elem.items(): + elem.set(key, self.unescape(value)) diff --git a/SSG/markdown/util.py b/SSG/markdown/util.py new file mode 100644 index 0000000..e6b08e5 --- /dev/null +++ b/SSG/markdown/util.py @@ -0,0 +1,358 @@ +""" +Python Markdown + +A Python implementation of John Gruber's Markdown. + +Documentation: https://python-markdown.github.io/ +GitHub: https://github.com/Python-Markdown/markdown/ +PyPI: https://pypi.org/project/Markdown/ + +Started by Manfred Stienstra (http://www.dwerg.net/). +Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +Currently maintained by Waylan Limberg (https://github.com/waylan), +Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see LICENSE.md for details). +""" + +import re +import sys +import warnings +from collections import namedtuple +from functools import wraps, lru_cache +from itertools import count + + +""" +Constants you might want to modify +----------------------------------------------------------------------------- +""" + + +BLOCK_LEVEL_ELEMENTS = [ + # Elements which are invalid to wrap in a `<p>` tag. + # See https://w3c.github.io/html/grouping-content.html#the-p-element + 'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', + 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', + 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul', + # Other elements which Markdown should not be mucking up the contents of. + 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', + 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', + 'style', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video' +] + +# Placeholders +STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder +ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder +INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:" +INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX +INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)') +AMP_SUBSTITUTE = STX+"amp"+ETX +HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX +HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)') +TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX + + +""" +Constants you probably do not need to change +----------------------------------------------------------------------------- +""" + +RTL_BIDI_RANGES = ( + ('\u0590', '\u07FF'), + # Hebrew (0590-05FF), Arabic (0600-06FF), + # Syriac (0700-074F), Arabic supplement (0750-077F), + # Thaana (0780-07BF), Nko (07C0-07FF). + ('\u2D30', '\u2D7F') # Tifinagh +) + + +""" +AUXILIARY GLOBAL FUNCTIONS +============================================================================= +""" + + +@lru_cache(maxsize=None) +def get_installed_extensions(): + if sys.version_info >= (3, 10): + from importlib import metadata + else: # <PY310 use backport + import importlib_metadata as metadata + # Only load extension entry_points once. + return metadata.entry_points(group='markdown.extensions') + + +def deprecated(message, stacklevel=2): + """ + Raise a DeprecationWarning when wrapped function/method is called. + + Usage: + @deprecated("This method will be removed in version X; use Y instead.") + def some_method()" + pass + """ + def wrapper(func): + @wraps(func) + def deprecated_func(*args, **kwargs): + warnings.warn( + f"'{func.__name__}' is deprecated. {message}", + category=DeprecationWarning, + stacklevel=stacklevel + ) + return func(*args, **kwargs) + return deprecated_func + return wrapper + + +def parseBoolValue(value, fail_on_errors=True, preserve_none=False): + """Parses a string representing bool value. If parsing was successful, + returns True or False. If preserve_none=True, returns True, False, + or None. If parsing was not successful, raises ValueError, or, if + fail_on_errors=False, returns None.""" + if not isinstance(value, str): + if preserve_none and value is None: + return value + return bool(value) + elif preserve_none and value.lower() == 'none': + return None + elif value.lower() in ('true', 'yes', 'y', 'on', '1'): + return True + elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'): + return False + elif fail_on_errors: + raise ValueError('Cannot parse bool value: %r' % value) + + +def code_escape(text): + """Escape code.""" + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + + +def _get_stack_depth(size=2): + """Get current stack depth, performantly. + """ + frame = sys._getframe(size) + + for size in count(size): + frame = frame.f_back + if not frame: + return size + + +def nearing_recursion_limit(): + """Return true if current stack depth is within 100 of maximum limit.""" + return sys.getrecursionlimit() - _get_stack_depth() < 100 + + +""" +MISC AUXILIARY CLASSES +============================================================================= +""" + + +class AtomicString(str): + """A string which should not be further processed.""" + pass + + +class Processor: + def __init__(self, md=None): + self.md = md + + +class HtmlStash: + """ + This class is used for stashing HTML objects that we extract + in the beginning and replace with place-holders. + """ + + def __init__(self): + """ Create a HtmlStash. """ + self.html_counter = 0 # for counting inline html segments + self.rawHtmlBlocks = [] + self.tag_counter = 0 + self.tag_data = [] # list of dictionaries in the order tags appear + + def store(self, html): + """ + Saves an HTML segment for later reinsertion. Returns a + placeholder string that needs to be inserted into the + document. + + Keyword arguments: + + * html: an html segment + + Returns : a placeholder string + + """ + self.rawHtmlBlocks.append(html) + placeholder = self.get_placeholder(self.html_counter) + self.html_counter += 1 + return placeholder + + def reset(self): + self.html_counter = 0 + self.rawHtmlBlocks = [] + + def get_placeholder(self, key): + return HTML_PLACEHOLDER % key + + def store_tag(self, tag, attrs, left_index, right_index): + """Store tag data and return a placeholder.""" + self.tag_data.append({'tag': tag, 'attrs': attrs, + 'left_index': left_index, + 'right_index': right_index}) + placeholder = TAG_PLACEHOLDER % str(self.tag_counter) + self.tag_counter += 1 # equal to the tag's index in self.tag_data + return placeholder + + +# Used internally by `Registry` for each item in its sorted list. +# Provides an easier to read API when editing the code later. +# For example, `item.name` is more clear than `item[0]`. +_PriorityItem = namedtuple('PriorityItem', ['name', 'priority']) + + +class Registry: + """ + A priority sorted registry. + + A `Registry` instance provides two public methods to alter the data of the + registry: `register` and `deregister`. Use `register` to add items and + `deregister` to remove items. See each method for specifics. + + When registering an item, a "name" and a "priority" must be provided. All + items are automatically sorted by "priority" from highest to lowest. The + "name" is used to remove ("deregister") and get items. + + A `Registry` instance it like a list (which maintains order) when reading + data. You may iterate over the items, get an item and get a count (length) + of all items. You may also check that the registry contains an item. + + When getting an item you may use either the index of the item or the + string-based "name". For example: + + registry = Registry() + registry.register(SomeItem(), 'itemname', 20) + # Get the item by index + item = registry[0] + # Get the item by name + item = registry['itemname'] + + When checking that the registry contains an item, you may use either the + string-based "name", or a reference to the actual item. For example: + + someitem = SomeItem() + registry.register(someitem, 'itemname', 20) + # Contains the name + assert 'itemname' in registry + # Contains the item instance + assert someitem in registry + + The method `get_index_for_name` is also available to obtain the index of + an item using that item's assigned "name". + """ + + def __init__(self): + self._data = {} + self._priority = [] + self._is_sorted = False + + def __contains__(self, item): + if isinstance(item, str): + # Check if an item exists by this name. + return item in self._data.keys() + # Check if this instance exists. + return item in self._data.values() + + def __iter__(self): + self._sort() + return iter([self._data[k] for k, p in self._priority]) + + def __getitem__(self, key): + self._sort() + if isinstance(key, slice): + data = Registry() + for k, p in self._priority[key]: + data.register(self._data[k], k, p) + return data + if isinstance(key, int): + return self._data[self._priority[key].name] + return self._data[key] + + def __len__(self): + return len(self._priority) + + def __repr__(self): + return '<{}({})>'.format(self.__class__.__name__, list(self)) + + def get_index_for_name(self, name): + """ + Return the index of the given name. + """ + if name in self: + self._sort() + return self._priority.index( + [x for x in self._priority if x.name == name][0] + ) + raise ValueError('No item named "{}" exists.'.format(name)) + + def register(self, item, name, priority): + """ + Add an item to the registry with the given name and priority. + + Parameters: + + * `item`: The item being registered. + * `name`: A string used to reference the item. + * `priority`: An integer or float used to sort against all items. + + If an item is registered with a "name" which already exists, the + existing item is replaced with the new item. Treat carefully as the + old item is lost with no way to recover it. The new item will be + sorted according to its priority and will **not** retain the position + of the old item. + """ + if name in self: + # Remove existing item of same name first + self.deregister(name) + self._is_sorted = False + self._data[name] = item + self._priority.append(_PriorityItem(name, priority)) + + def deregister(self, name, strict=True): + """ + Remove an item from the registry. + + Set `strict=False` to fail silently. + """ + try: + index = self.get_index_for_name(name) + del self._priority[index] + del self._data[name] + except ValueError: + if strict: + raise + + def _sort(self): + """ + Sort the registry by priority from highest to lowest. + + This method is called internally and should never be explicitly called. + """ + if not self._is_sorted: + self._priority.sort(key=lambda item: item.priority, reverse=True) + self._is_sorted = True diff --git a/SSG/ssg.py b/SSG/ssg.py index 0c70802..1c62485 100644 --- a/SSG/ssg.py +++ b/SSG/ssg.py @@ -1,8 +1,4 @@ -from asyncio.windows_events import NULL -from lib2to3.pgen2.token import EQUAL -import string import sys -from tokenize import String from utils.help import * from utils.input import parseInput, readConfigFile @@ -54,7 +50,7 @@ def checkIfParamExists(i:int): elif len(inputConfigFile): readConfigFile(inputConfigFile) elif len(inputPath): - parseInput(inputPath,inputLang); + parseInput(inputPath,inputLang) else: raise SystemExit(f"No arguments were passed if unsure about which aurgument are avialable use -h or --help") except SystemExit as err: diff --git a/SSG/utils/input.py b/SSG/utils/input.py index 272b868..9dd2d64 100644 --- a/SSG/utils/input.py +++ b/SSG/utils/input.py @@ -1,15 +1,9 @@ -from ast import parse -from asyncio.windows_events import NULL -from distutils import file_util import json from os.path import exists import os -from pickletools import string1 import shutil import codecs -import string -from tkinter.tix import Tree -from tokenize import String +import markdown def parseInput(arg,lang="en-CA"): global newDir #Creates a new Directory for the output @@ -76,6 +70,8 @@ def parseFile(arg): <html lang=''' + newlang + '''> <head> <meta charset="utf-8"> +<meta name="description" content="P-DR0ZD Static Site Generator ''' + fileName + ''' Page "> +<meta name="robots" content="noindex, nofollow" /> <title>''' + fileName + ''' @@ -98,69 +94,14 @@ def parseFile(arg): site.write(footer) #Finishes the document with a body elif os.path.splitext(arg)[1] == ".md": - site = codecs.open(fullName, "w",encoding="utf-8") + site = codecs.open(fullName, "w", encoding="utf-8") site.write(header) - for line in lines[1:]: #Loops through the list to fill out the html - if line != "": - site.write(parseMarkdown(line)) - else: - site.write('

\n

') - + #Using The markdown parser requiers the list to be in a string format so i used join + site.write(markdown.markdown("\n ".join(lines[1:]))) site.write(footer) #Finishes the document with a body - - - else: print(f"Unable to Proccses " + arg + " because its not a text file") - -def parseMarkdown(md:str): - htmlStr = "" - if md.startswith("# "): #implements heading 1 conversion. - htmlStr = "

" + md.replace("# ","") + "

" - elif(len(md.strip()) != 0): - htmlStr = md - - search = '*' - # searching for position of * - index = md.find(search) - lastIndex = 0 - if index != -1: - lastIndex = searchStr(search, index, md) - # Adding Italics in markdown - if index != -1 and lastIndex != 0: - md = replace("", lastIndex, md) - md = replace("", index, md) - htmlStr = md - - search = '`' - # searching for position of * - index = md.find(search) - lastIndex = 0 - if index != -1: - lastIndex = searchStr(search, index, md) - # Adding Italics in markdown - if index != -1 and lastIndex != 0: - md = replace("", lastIndex, md) - md = replace("", index, md) - htmlStr = md - - htmlStr += " " #adding a space to the end of the line - return htmlStr - -def searchStr(search, index, md:str): - # searching for position of search variable - lastIndex = 0 - if index != -1: - for char_index in range(index, len(md)): - if md[char_index] == search: - lastIndex = char_index - return lastIndex - -def replace(replace:str, index, md:str): - md = md[:index] + replace + md[index+1:] # Have to do the last position first or else it messes with the first indexmd - return md - def parseDirectory(arg): allFiles = os.listdir(arg) #Grabs everything in the directory into a list @@ -186,6 +127,8 @@ def createIndex(): + + Index