This commit is contained in:
s464967 2024-05-26 19:49:15 +02:00
parent 829f991f35
commit 8af59a8246
26953 changed files with 7326748 additions and 0 deletions

View File

@ -0,0 +1 @@
pip

View File

@ -0,0 +1,30 @@
BSD 3-Clause License
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,146 @@
Metadata-Version: 2.1
Name: Markdown
Version: 3.6
Summary: Python implementation of John Gruber's Markdown.
Author: Manfred Stienstra, Yuri Takhteyev
Author-email: Waylan limberg <python.markdown@gmail.com>
Maintainer: Isaac Muse
Maintainer-email: Waylan Limberg <python.markdown@gmail.com>
License: BSD 3-Clause License
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Project-URL: Homepage, https://Python-Markdown.github.io/
Project-URL: Documentation, https://Python-Markdown.github.io/
Project-URL: Repository, https://github.com/Python-Markdown/markdown
Project-URL: Issue Tracker, https://github.com/Python-Markdown/markdown/issues
Project-URL: Changelog, https://python-markdown.github.io/changelog/
Keywords: markdown,markdown-parser,python-markdown,markdown-to-html
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Communications :: Email :: Filters
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries
Classifier: Topic :: Internet :: WWW/HTTP :: Site Management
Classifier: Topic :: Software Development :: Documentation
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Filters
Classifier: Topic :: Text Processing :: Markup :: HTML
Classifier: Topic :: Text Processing :: Markup :: Markdown
Requires-Python: >=3.8
Description-Content-Type: text/markdown
License-File: LICENSE.md
Requires-Dist: importlib-metadata >=4.4 ; python_version < "3.10"
Provides-Extra: docs
Requires-Dist: mkdocs >=1.5 ; extra == 'docs'
Requires-Dist: mkdocs-nature >=0.6 ; extra == 'docs'
Requires-Dist: mdx-gh-links >=0.2 ; extra == 'docs'
Requires-Dist: mkdocstrings[python] ; extra == 'docs'
Requires-Dist: mkdocs-gen-files ; extra == 'docs'
Requires-Dist: mkdocs-section-index ; extra == 'docs'
Requires-Dist: mkdocs-literate-nav ; extra == 'docs'
Provides-Extra: testing
Requires-Dist: coverage ; extra == 'testing'
Requires-Dist: pyyaml ; extra == 'testing'
[Python-Markdown][]
===================
[![Build Status][build-button]][build]
[![Coverage Status][codecov-button]][codecov]
[![Latest Version][mdversion-button]][md-pypi]
[![Python Versions][pyversion-button]][md-pypi]
[![BSD License][bsdlicense-button]][bsdlicense]
[![Code of Conduct][codeofconduct-button]][Code of Conduct]
[build-button]: https://github.com/Python-Markdown/markdown/workflows/CI/badge.svg?event=push
[build]: https://github.com/Python-Markdown/markdown/actions?query=workflow%3ACI+event%3Apush
[codecov-button]: https://codecov.io/gh/Python-Markdown/markdown/branch/master/graph/badge.svg
[codecov]: https://codecov.io/gh/Python-Markdown/markdown
[mdversion-button]: https://img.shields.io/pypi/v/Markdown.svg
[md-pypi]: https://pypi.org/project/Markdown/
[pyversion-button]: https://img.shields.io/pypi/pyversions/Markdown.svg
[bsdlicense-button]: https://img.shields.io/badge/license-BSD-yellow.svg
[bsdlicense]: https://opensource.org/licenses/BSD-3-Clause
[codeofconduct-button]: https://img.shields.io/badge/code%20of%20conduct-contributor%20covenant-green.svg?style=flat-square
[Code of Conduct]: https://github.com/Python-Markdown/markdown/blob/master/CODE_OF_CONDUCT.md
This is a Python implementation of John Gruber's [Markdown][].
It is almost completely compliant with the reference implementation,
though there are a few known issues. See [Features][] for information
on what exactly is supported and what is not. Additional features are
supported by the [Available Extensions][].
[Python-Markdown]: https://Python-Markdown.github.io/
[Markdown]: https://daringfireball.net/projects/markdown/
[Features]: https://Python-Markdown.github.io#Features
[Available Extensions]: https://Python-Markdown.github.io/extensions
Documentation
-------------
```bash
pip install markdown
```
```python
import markdown
html = markdown.markdown(your_text_string)
```
For more advanced [installation] and [usage] documentation, see the `docs/` directory
of the distribution or the project website at <https://Python-Markdown.github.io/>.
[installation]: https://python-markdown.github.io/install/
[usage]: https://python-markdown.github.io/reference/
See the change log at <https://python-markdown.github.io/changelog/>.
Support
-------
You may report bugs, ask for help, and discuss various other issues on the [bug tracker][].
[bug tracker]: https://github.com/Python-Markdown/markdown/issues
Code of Conduct
---------------
Everyone interacting in the Python-Markdown project's code bases, issue trackers,
and mailing lists is expected to follow the [Code of Conduct].

View File

@ -0,0 +1,74 @@
../../Scripts/markdown_py.exe,sha256=oyzRuTfpmvyLeljsFBt_JQ9l3i5j64Gl5yrqOP4ZSw0,108386
Markdown-3.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
Markdown-3.6.dist-info/LICENSE.md,sha256=e6TrbRCzKy0R3OE4ITQDUc27swuozMZ4Qdsv_Ybnmso,1650
Markdown-3.6.dist-info/METADATA,sha256=8_ETqzTxcOemQXj7ujUabMFcDBDGtsRrccFDr1-XWvc,7040
Markdown-3.6.dist-info/RECORD,,
Markdown-3.6.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
Markdown-3.6.dist-info/entry_points.txt,sha256=lMEyiiA_ZZyfPCBlDviBl-SiU0cfoeuEKpwxw361sKQ,1102
Markdown-3.6.dist-info/top_level.txt,sha256=IAxs8x618RXoH1uCqeLLxXsDefJvE_mIibr_M4sOlyk,9
markdown/__init__.py,sha256=dfzwwdpG9L8QLEPBpLFPIHx_BN056aZXp9xZifTxYIU,1777
markdown/__main__.py,sha256=innFBxRqwPBNxG1zhKktJji4bnRKtVyYYd30ID13Tcw,5859
markdown/__meta__.py,sha256=DqtqnYYLznrkvI1G4JalBc4WpgOp48naNoG9zlMWZas,1712
markdown/__pycache__/__init__.cpython-311.pyc,,
markdown/__pycache__/__main__.cpython-311.pyc,,
markdown/__pycache__/__meta__.cpython-311.pyc,,
markdown/__pycache__/blockparser.cpython-311.pyc,,
markdown/__pycache__/blockprocessors.cpython-311.pyc,,
markdown/__pycache__/core.cpython-311.pyc,,
markdown/__pycache__/htmlparser.cpython-311.pyc,,
markdown/__pycache__/inlinepatterns.cpython-311.pyc,,
markdown/__pycache__/postprocessors.cpython-311.pyc,,
markdown/__pycache__/preprocessors.cpython-311.pyc,,
markdown/__pycache__/serializers.cpython-311.pyc,,
markdown/__pycache__/test_tools.cpython-311.pyc,,
markdown/__pycache__/treeprocessors.cpython-311.pyc,,
markdown/__pycache__/util.cpython-311.pyc,,
markdown/blockparser.py,sha256=j4CQImVpiq7g9pz8wCxvzT61X_T2iSAjXupHJk8P3eA,5728
markdown/blockprocessors.py,sha256=koY5rq8DixzBCHcquvZJp6x2JYyBGjrwxMWNZhd6D2U,27013
markdown/core.py,sha256=DyyzDsmd-KcuEp8ZWUKJAeUCt7B7G3J3NeqZqp3LphI,21335
markdown/extensions/__init__.py,sha256=9z1khsdKCVrmrJ_2GfxtPAdjD3FyMe5vhC7wmM4O9m0,4822
markdown/extensions/__pycache__/__init__.cpython-311.pyc,,
markdown/extensions/__pycache__/abbr.cpython-311.pyc,,
markdown/extensions/__pycache__/admonition.cpython-311.pyc,,
markdown/extensions/__pycache__/attr_list.cpython-311.pyc,,
markdown/extensions/__pycache__/codehilite.cpython-311.pyc,,
markdown/extensions/__pycache__/def_list.cpython-311.pyc,,
markdown/extensions/__pycache__/extra.cpython-311.pyc,,
markdown/extensions/__pycache__/fenced_code.cpython-311.pyc,,
markdown/extensions/__pycache__/footnotes.cpython-311.pyc,,
markdown/extensions/__pycache__/legacy_attrs.cpython-311.pyc,,
markdown/extensions/__pycache__/legacy_em.cpython-311.pyc,,
markdown/extensions/__pycache__/md_in_html.cpython-311.pyc,,
markdown/extensions/__pycache__/meta.cpython-311.pyc,,
markdown/extensions/__pycache__/nl2br.cpython-311.pyc,,
markdown/extensions/__pycache__/sane_lists.cpython-311.pyc,,
markdown/extensions/__pycache__/smarty.cpython-311.pyc,,
markdown/extensions/__pycache__/tables.cpython-311.pyc,,
markdown/extensions/__pycache__/toc.cpython-311.pyc,,
markdown/extensions/__pycache__/wikilinks.cpython-311.pyc,,
markdown/extensions/abbr.py,sha256=JqFOfU7JlhIFY06-nZnSU0wDqneFKKWMe95eXB-iLtc,3250
markdown/extensions/admonition.py,sha256=Hqcn3I8JG0i-OPWdoqI189TmlQRgH6bs5PmpCANyLlg,6547
markdown/extensions/attr_list.py,sha256=t3PrgAr5Ebldnq3nJNbteBt79bN0ccXS5RemmQfUZ9g,7820
markdown/extensions/codehilite.py,sha256=ChlmpM6S--j-UK7t82859UpYjm8EftdiLqmgDnknyes,13503
markdown/extensions/def_list.py,sha256=J3NVa6CllfZPsboJCEycPyRhtjBHnOn8ET6omEvVlDo,4029
markdown/extensions/extra.py,sha256=1vleT284kued4HQBtF83IjSumJVo0q3ng6MjTkVNfNQ,2163
markdown/extensions/fenced_code.py,sha256=-fYSmRZ9DTYQ8HO9b_78i47kVyVu6mcVJlqVTMdzvo4,8300
markdown/extensions/footnotes.py,sha256=bRFlmIBOKDI5efG1jZfDkMoV2osfqWip1rN1j2P-mMg,16710
markdown/extensions/legacy_attrs.py,sha256=oWcyNrfP0F6zsBoBOaD5NiwrJyy4kCpgQLl12HA7JGU,2788
markdown/extensions/legacy_em.py,sha256=-Z_w4PEGSS-Xg-2-BtGAnXwwy5g5GDgv2tngASnPgxg,1693
markdown/extensions/md_in_html.py,sha256=y4HEWEnkvfih22fojcaJeAmjx1AtF8N-a_jb6IDFfts,16546
markdown/extensions/meta.py,sha256=v_4Uq7nbcQ76V1YAvqVPiNLbRLIQHJsnfsk-tN70RmY,2600
markdown/extensions/nl2br.py,sha256=9KKcrPs62c3ENNnmOJZs0rrXXqUtTCfd43j1_OPpmgU,1090
markdown/extensions/sane_lists.py,sha256=ogAKcm7gEpcXV7fSTf8JZH5YdKAssPCEOUzdGM3C9Tw,2150
markdown/extensions/smarty.py,sha256=yqT0OiE2AqYrqqZtcUFFmp2eJsQHomiKzgyG2JFb9rI,11048
markdown/extensions/tables.py,sha256=oTDvGD1qp9xjVWPGYNgDBWe9NqsX5gS6UU5wUsQ1bC8,8741
markdown/extensions/toc.py,sha256=PGg-EqbBubm3n0b633r8Xa9kc6JIdbo20HGAOZ6GEl8,18322
markdown/extensions/wikilinks.py,sha256=j7D2sozica6sqXOUa_GuAXqIzxp-7Hi60bfXymiuma8,3285
markdown/htmlparser.py,sha256=dEr6IE7i9b6Tc1gdCLZGeWw6g6-E-jK1Z4KPj8yGk8Q,14332
markdown/inlinepatterns.py,sha256=7_HF5nTOyQag_CyBgU4wwmuI6aMjtadvGadyS9IP21w,38256
markdown/postprocessors.py,sha256=eYi6eW0mGudmWpmsW45hduLwX66Zr8Bf44WyU9vKp-I,4807
markdown/preprocessors.py,sha256=pq5NnHKkOSVQeIo-ajC-Yt44kvyMV97D04FBOQXctJM,3224
markdown/serializers.py,sha256=YtAFYQoOdp_TAmYGow6nBo0eB6I-Sl4PTLdLDfQJHwQ,7174
markdown/test_tools.py,sha256=MtN4cf3ZPDtb83wXLTol-3q3aIGRIkJ2zWr6fd-RgVE,8662
markdown/treeprocessors.py,sha256=o4dnoZZsIeVV8qR45Njr8XgwKleWYDS5pv8dKQhJvv8,17651
markdown/util.py,sha256=vJ1E0xjMzDAlTqLUSJWgdEvxdQfLXDEYUssOQMw9kPQ,13929

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.43.0)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@ -0,0 +1,22 @@
[console_scripts]
markdown_py = markdown.__main__:run
[markdown.extensions]
abbr = markdown.extensions.abbr:AbbrExtension
admonition = markdown.extensions.admonition:AdmonitionExtension
attr_list = markdown.extensions.attr_list:AttrListExtension
codehilite = markdown.extensions.codehilite:CodeHiliteExtension
def_list = markdown.extensions.def_list:DefListExtension
extra = markdown.extensions.extra:ExtraExtension
fenced_code = markdown.extensions.fenced_code:FencedCodeExtension
footnotes = markdown.extensions.footnotes:FootnoteExtension
legacy_attrs = markdown.extensions.legacy_attrs:LegacyAttrExtension
legacy_em = markdown.extensions.legacy_em:LegacyEmExtension
md_in_html = markdown.extensions.md_in_html:MarkdownInHtmlExtension
meta = markdown.extensions.meta:MetaExtension
nl2br = markdown.extensions.nl2br:Nl2BrExtension
sane_lists = markdown.extensions.sane_lists:SaneListExtension
smarty = markdown.extensions.smarty:SmartyExtension
tables = markdown.extensions.tables:TableExtension
toc = markdown.extensions.toc:TocExtension
wikilinks = markdown.extensions.wikilinks:WikiLinkExtension

View File

@ -0,0 +1 @@
markdown

View File

@ -0,0 +1 @@
pip

View File

@ -0,0 +1,28 @@
Copyright 2010 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,93 @@
Metadata-Version: 2.1
Name: MarkupSafe
Version: 2.1.5
Summary: Safely add untrusted strings to HTML/XML markup.
Home-page: https://palletsprojects.com/p/markupsafe/
Maintainer: Pallets
Maintainer-email: contact@palletsprojects.com
License: BSD-3-Clause
Project-URL: Donate, https://palletsprojects.com/donate
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
Project-URL: Source Code, https://github.com/pallets/markupsafe/
Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
Project-URL: Chat, https://discord.gg/pallets
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Text Processing :: Markup :: HTML
Requires-Python: >=3.7
Description-Content-Type: text/x-rst
License-File: LICENSE.rst
MarkupSafe
==========
MarkupSafe implements a text object that escapes characters so it is
safe to use in HTML and XML. Characters that have special meanings are
replaced so that they display as the actual characters. This mitigates
injection attacks, meaning untrusted user input can safely be displayed
on a page.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/getting-started/
Examples
--------
.. code-block:: pycon
>>> from markupsafe import Markup, escape
>>> # escape replaces special characters and wraps in Markup
>>> escape("<script>alert(document.cookie);</script>")
Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> # wrap in Markup to mark text "safe" and prevent escaping
>>> Markup("<strong>Hello</strong>")
Markup('<strong>hello</strong>')
>>> escape(Markup("<strong>Hello</strong>"))
Markup('<strong>hello</strong>')
>>> # Markup is a str subclass
>>> # methods and operators escape their arguments
>>> template = Markup("Hello <em>{name}</em>")
>>> template.format(name='"World"')
Markup('Hello <em>&#34;World&#34;</em>')
Donate
------
The Pallets organization develops and supports MarkupSafe and other
popular packages. In order to grow the community of contributors and
users, and allow the maintainers to devote more time to the projects,
`please donate today`_.
.. _please donate today: https://palletsprojects.com/donate
Links
-----
- Documentation: https://markupsafe.palletsprojects.com/
- Changes: https://markupsafe.palletsprojects.com/changes/
- PyPI Releases: https://pypi.org/project/MarkupSafe/
- Source Code: https://github.com/pallets/markupsafe/
- Issue Tracker: https://github.com/pallets/markupsafe/issues/
- Chat: https://discord.gg/pallets

View File

@ -0,0 +1,14 @@
MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096
MarkupSafe-2.1.5.dist-info/RECORD,,
MarkupSafe-2.1.5.dist-info/WHEEL,sha256=ircjsfhzblqgSzO8ow7-0pXK-RVqDqNRGQ8F650AUNM,102
MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290
markupsafe/__pycache__/__init__.cpython-311.pyc,,
markupsafe/__pycache__/_native.cpython-311.pyc,,
markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776
markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403
markupsafe/_speedups.cp311-win_amd64.pyd,sha256=MEqnkyBOHmstwQr50hKitovHjrHhMJ0gYmya4Fu1DK0,15872
markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238
markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.42.0)
Root-Is-Purelib: false
Tag: cp311-cp311-win_amd64

View File

@ -0,0 +1 @@
markupsafe

Binary file not shown.

View File

@ -0,0 +1,222 @@
# don't import any costly modules
import sys
import os
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
import warnings
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils."
)
def clear_distutils():
if 'distutils' not in sys.modules:
return
import warnings
warnings.warn("Setuptools is replacing distutils.")
mods = [
name
for name in sys.modules
if name == "distutils" or name.startswith("distutils.")
]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
return which == 'local'
def ensure_local_distutils():
import importlib
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
with shim():
importlib.import_module('distutils')
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
assert 'setuptools._distutils.log' not in sys.modules
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class _TrivialRe:
def __init__(self, *patterns):
self._patterns = patterns
def match(self, string):
return all(pat in string for pat in self._patterns)
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
if self.is_cpython():
return
import importlib
import importlib.abc
import importlib.util
try:
mod = importlib.import_module('setuptools._distutils')
except Exception:
# There are a couple of cases where setuptools._distutils
# may not be present:
# - An older Setuptools without a local distutils is
# taking precedence. Ref #2957.
# - Path manipulation during sitecustomize removes
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
mod.__name__ = 'distutils'
return mod
def exec_module(self, module):
pass
return importlib.util.spec_from_loader(
'distutils', DistutilsLoader(), origin=mod.__file__
)
@staticmethod
def is_cpython():
"""
Suppress supplying distutils for CPython (build and tests).
Ref #2965 and #3007.
"""
return os.path.isfile('pybuilddir.txt')
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@classmethod
def pip_imported_during_build(cls):
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
)
@staticmethod
def frame_file_is_setup(frame):
"""
Return True if the indicated frame suggests a setup.py file.
"""
# some frames may not have __file__ (#2940)
return frame.f_globals.get('__file__', '').endswith('setup.py')
def spec_for_sensitive_tests(self):
"""
Ensure stdlib distutils when running select tests under CPython.
python/cpython#91169
"""
clear_distutils()
self.spec_for_distutils = lambda: None
sensitive_tests = (
[
'test.test_distutils',
'test.test_peg_generator',
'test.test_importlib',
]
if sys.version_info < (3, 10)
else [
'test.test_distutils',
]
)
for name in DistutilsMetaFinder.sensitive_tests:
setattr(
DistutilsMetaFinder,
f'spec_for_{name}',
DistutilsMetaFinder.spec_for_sensitive_tests,
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
DISTUTILS_FINDER in sys.meta_path or insert_shim()
class shim:
def __enter__(self):
insert_shim()
def __exit__(self, exc, value, tb):
remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass

View File

@ -0,0 +1 @@
__import__('_distutils_hack').do_override()

View File

@ -0,0 +1 @@
import _virtualenv

View File

@ -0,0 +1,130 @@
"""Patches that are applied at runtime to the virtual environment"""
# -*- coding: utf-8 -*-
import os
import sys
VIRTUALENV_PATCH_FILE = os.path.join(__file__)
def patch_dist(dist):
"""
Distutils allows user to configure some arguments via a configuration file:
https://docs.python.org/3/install/index.html#distutils-configuration-files
Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.
"""
# we cannot allow some install config as that would get packages installed outside of the virtual environment
old_parse_config_files = dist.Distribution.parse_config_files
def parse_config_files(self, *args, **kwargs):
result = old_parse_config_files(self, *args, **kwargs)
install = self.get_option_dict("install")
if "prefix" in install: # the prefix governs where to install the libraries
install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix)
for base in ("purelib", "platlib", "headers", "scripts", "data"):
key = "install_{}".format(base)
if key in install: # do not allow global configs to hijack venv paths
install.pop(key, None)
return result
dist.Distribution.parse_config_files = parse_config_files
# Import hook that patches some modules to ignore configuration values that break package installation in case
# of virtual environments.
_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist"
if sys.version_info > (3, 4):
# https://docs.python.org/3/library/importlib.html#setting-up-an-importer
class _Finder:
"""A meta path finder that allows patching the imported distutils modules"""
fullname = None
# lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
# because there are gevent-based applications that need to be first to import threading by themselves.
# See https://github.com/pypa/virtualenv/issues/1895 for details.
lock = []
def find_spec(self, fullname, path, target=None): # noqa: U100
if fullname in _DISTUTILS_PATCH and self.fullname is None:
# initialize lock[0] lazily
if len(self.lock) == 0:
import threading
lock = threading.Lock()
# there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
# observing .lock as empty, and further going into hereby initialization. However due to the GIL,
# list.append() operation is atomic and this way only one of the threads will "win" to put the lock
# - that every thread will use - into .lock[0].
# https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.lock.append(lock)
from functools import partial
from importlib.util import find_spec
with self.lock[0]:
self.fullname = fullname
try:
spec = find_spec(fullname, path)
if spec is not None:
# https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
is_new_api = hasattr(spec.loader, "exec_module")
func_name = "exec_module" if is_new_api else "load_module"
old = getattr(spec.loader, func_name)
func = self.exec_module if is_new_api else self.load_module
if old is not func:
try:
setattr(spec.loader, func_name, partial(func, old))
except AttributeError:
pass # C-Extension loaders are r/o such as zipimporter with <python 3.7
return spec
finally:
self.fullname = None
@staticmethod
def exec_module(old, module):
old(module)
if module.__name__ in _DISTUTILS_PATCH:
patch_dist(module)
@staticmethod
def load_module(old, name):
module = old(name)
if module.__name__ in _DISTUTILS_PATCH:
patch_dist(module)
return module
sys.meta_path.insert(0, _Finder())
else:
# https://www.python.org/dev/peps/pep-0302/
from imp import find_module
from pkgutil import ImpImporter, ImpLoader
class _VirtualenvImporter(object, ImpImporter):
def __init__(self, path=None):
object.__init__(self)
ImpImporter.__init__(self, path)
def find_module(self, fullname, path=None):
if fullname in _DISTUTILS_PATCH:
try:
return _VirtualenvLoader(fullname, *find_module(fullname.split(".")[-1], path))
except ImportError:
pass
return None
class _VirtualenvLoader(object, ImpLoader):
def __init__(self, fullname, file, filename, etc):
object.__init__(self)
ImpLoader.__init__(self, fullname, file, filename, etc)
def load_module(self, fullname):
module = super(_VirtualenvLoader, self).load_module(fullname)
patch_dist(module)
module.__loader__ = None # distlib fallback
return module
sys.meta_path.append(_VirtualenvImporter())

View File

@ -0,0 +1,13 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Binary file not shown.

View File

@ -0,0 +1,480 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic entry point for Abseil Python applications.
To use this module, define a ``main`` function with a single ``argv`` argument
and call ``app.run(main)``. For example::
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if __name__ == '__main__':
app.run(main)
"""
import collections
import errno
import os
import pdb
import sys
import textwrap
import traceback
from absl import command_name
from absl import flags
from absl import logging
try:
import faulthandler
except ImportError:
faulthandler = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('run_with_pdb', False, 'Set to true for PDB debug mode')
flags.DEFINE_boolean('pdb_post_mortem', False,
'Set to true to handle uncaught exceptions with PDB '
'post mortem.')
flags.DEFINE_alias('pdb', 'pdb_post_mortem')
flags.DEFINE_boolean('run_with_profiling', False,
'Set to true for profiling the script. '
'Execution will be slower, and the output format might '
'change over time.')
flags.DEFINE_string('profile_file', None,
'Dump profile information to a file (for python -m '
'pstats). Implies --run_with_profiling.')
flags.DEFINE_boolean('use_cprofile_for_profiling', True,
'Use cProfile instead of the profile module for '
'profiling. This has no effect unless '
'--run_with_profiling is set.')
flags.DEFINE_boolean('only_check_args', False,
'Set to true to validate args and exit.',
allow_hide_cpp=True)
# If main() exits via an abnormal exception, call into these
# handlers before exiting.
EXCEPTION_HANDLERS = []
class Error(Exception):
pass
class UsageError(Error):
"""Exception raised when the arguments supplied by the user are invalid.
Raise this when the arguments supplied are invalid from the point of
view of the application. For example when two mutually exclusive
flags have been supplied or when there are not enough non-flag
arguments. It is distinct from flags.Error which covers the lower
level of parsing and validating individual flags.
"""
def __init__(self, message, exitcode=1):
super(UsageError, self).__init__(message)
self.exitcode = exitcode
class HelpFlag(flags.BooleanFlag):
"""Special boolean flag that displays usage and raises SystemExit."""
NAME = 'help'
SHORT_NAME = '?'
def __init__(self):
super(HelpFlag, self).__init__(
self.NAME, False, 'show this help',
short_name=self.SHORT_NAME, allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
usage(shorthelp=True, writeto_stdout=True)
# Advertise --helpfull on stdout, since usage() was on stdout.
print()
print('Try --helpfull to get a list of all flags.')
sys.exit(1)
class HelpshortFlag(HelpFlag):
"""--helpshort is an alias for --help."""
NAME = 'helpshort'
SHORT_NAME = None
class HelpfullFlag(flags.BooleanFlag):
"""Display help for flags in the main module and all dependent modules."""
def __init__(self):
super(HelpfullFlag, self).__init__(
'helpfull', False, 'show full help', allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
usage(writeto_stdout=True)
sys.exit(1)
class HelpXMLFlag(flags.BooleanFlag):
"""Similar to HelpfullFlag, but generates output in XML format."""
def __init__(self):
super(HelpXMLFlag, self).__init__(
'helpxml', False, 'like --helpfull, but generates XML output',
allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
flags.FLAGS.write_help_in_xml_format(sys.stdout)
sys.exit(1)
def parse_flags_with_usage(args):
"""Tries to parse the flags, print usage, and exit if unparsable.
Args:
args: [str], a non-empty list of the command line arguments including
program name.
Returns:
[str], a non-empty list of remaining command line arguments after parsing
flags, including program name.
"""
try:
return FLAGS(args)
except flags.Error as error:
message = str(error)
if '\n' in message:
final_message = 'FATAL Flags parsing error:\n%s\n' % textwrap.indent(
message, ' ')
else:
final_message = 'FATAL Flags parsing error: %s\n' % message
sys.stderr.write(final_message)
sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
sys.exit(1)
_define_help_flags_called = False
def define_help_flags():
"""Registers help flags. Idempotent."""
# Use a global to ensure idempotence.
global _define_help_flags_called
if not _define_help_flags_called:
flags.DEFINE_flag(HelpFlag())
flags.DEFINE_flag(HelpshortFlag()) # alias for --help
flags.DEFINE_flag(HelpfullFlag())
flags.DEFINE_flag(HelpXMLFlag())
_define_help_flags_called = True
def _register_and_parse_flags_with_usage(
argv=None,
flags_parser=parse_flags_with_usage,
):
"""Registers help flags, parses arguments and shows usage if appropriate.
This also calls sys.exit(0) if flag --only_check_args is True.
Args:
argv: [str], a non-empty list of the command line arguments including
program name, sys.argv is used if None.
flags_parser: Callable[[List[Text]], Any], the function used to parse flags.
The return value of this function is passed to `main` untouched.
It must guarantee FLAGS is parsed after this function is called.
Returns:
The return value of `flags_parser`. When using the default `flags_parser`,
it returns the following:
[str], a non-empty list of remaining command line arguments after parsing
flags, including program name.
Raises:
Error: Raised when flags_parser is called, but FLAGS is not parsed.
SystemError: Raised when it's called more than once.
"""
if _register_and_parse_flags_with_usage.done:
raise SystemError('Flag registration can be done only once.')
define_help_flags()
original_argv = sys.argv if argv is None else argv
args_to_main = flags_parser(original_argv)
if not FLAGS.is_parsed():
raise Error('FLAGS must be parsed after flags_parser is called.')
# Exit when told so.
if FLAGS.only_check_args:
sys.exit(0)
# Immediately after flags are parsed, bump verbosity to INFO if the flag has
# not been set.
if FLAGS['verbosity'].using_default_value:
FLAGS.verbosity = 0
_register_and_parse_flags_with_usage.done = True
return args_to_main
_register_and_parse_flags_with_usage.done = False
def _run_main(main, argv):
"""Calls main, optionally with pdb or profiler."""
if FLAGS.run_with_pdb:
sys.exit(pdb.runcall(main, argv))
elif FLAGS.run_with_profiling or FLAGS.profile_file:
# Avoid import overhead since most apps (including performance-sensitive
# ones) won't be run with profiling.
# pylint: disable=g-import-not-at-top
import atexit
if FLAGS.use_cprofile_for_profiling:
import cProfile as profile
else:
import profile
profiler = profile.Profile()
if FLAGS.profile_file:
atexit.register(profiler.dump_stats, FLAGS.profile_file)
else:
atexit.register(profiler.print_stats)
sys.exit(profiler.runcall(main, argv))
else:
sys.exit(main(argv))
def _call_exception_handlers(exception):
"""Calls any installed exception handlers."""
for handler in EXCEPTION_HANDLERS:
try:
if handler.wants(exception):
handler.handle(exception)
except: # pylint: disable=bare-except
try:
# We don't want to stop for exceptions in the exception handlers but
# we shouldn't hide them either.
logging.error(traceback.format_exc())
except: # pylint: disable=bare-except
# In case even the logging statement fails, ignore.
pass
def run(
main,
argv=None,
flags_parser=parse_flags_with_usage,
):
"""Begins executing the program.
Args:
main: The main function to execute. It takes an single argument "argv",
which is a list of command line arguments with parsed flags removed.
The return value is passed to `sys.exit`, and so for example
a return value of 0 or None results in a successful termination, whereas
a return value of 1 results in abnormal termination.
For more details, see https://docs.python.org/3/library/sys#sys.exit
argv: A non-empty list of the command line arguments including program name,
sys.argv is used if None.
flags_parser: Callable[[List[Text]], Any], the function used to parse flags.
The return value of this function is passed to `main` untouched.
It must guarantee FLAGS is parsed after this function is called.
Should be passed as a keyword-only arg which will become mandatory in a
future release.
- Parses command line flags with the flag module.
- If there are any errors, prints usage().
- Calls main() with the remaining arguments.
- If main() raises a UsageError, prints usage and the error message.
"""
try:
args = _run_init(
sys.argv if argv is None else argv,
flags_parser,
)
while _init_callbacks:
callback = _init_callbacks.popleft()
callback()
try:
_run_main(main, args)
except UsageError as error:
usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode)
except:
exc = sys.exc_info()[1]
# Don't try to post-mortem debug successful SystemExits, since those
# mean there wasn't actually an error. In particular, the test framework
# raises SystemExit(False) even if all tests passed.
if isinstance(exc, SystemExit) and not exc.code:
raise
# Check the tty so that we don't hang waiting for input in an
# non-interactive scenario.
if FLAGS.pdb_post_mortem and sys.stdout.isatty():
traceback.print_exc()
print()
print(' *** Entering post-mortem debugging ***')
print()
pdb.post_mortem()
raise
except Exception as e:
_call_exception_handlers(e)
raise
# Callbacks which have been deferred until after _run_init has been called.
_init_callbacks = collections.deque()
def call_after_init(callback):
"""Calls the given callback only once ABSL has finished initialization.
If ABSL has already finished initialization when ``call_after_init`` is
called then the callback is executed immediately, otherwise `callback` is
stored to be executed after ``app.run`` has finished initializing (aka. just
before the main function is called).
If called after ``app.run``, this is equivalent to calling ``callback()`` in
the caller thread. If called before ``app.run``, callbacks are run
sequentially (in an undefined order) in the same thread as ``app.run``.
Args:
callback: a callable to be called once ABSL has finished initialization.
This may be immediate if initialization has already finished. It
takes no arguments and returns nothing.
"""
if _run_init.done:
callback()
else:
_init_callbacks.append(callback)
def _run_init(
argv,
flags_parser,
):
"""Does one-time initialization and re-parses flags on rerun."""
if _run_init.done:
return flags_parser(argv)
command_name.make_process_name_useful()
# Set up absl logging handler.
logging.use_absl_handler()
args = _register_and_parse_flags_with_usage(
argv=argv,
flags_parser=flags_parser,
)
if faulthandler:
try:
faulthandler.enable()
except Exception: # pylint: disable=broad-except
# Some tests verify stderr output very closely, so don't print anything.
# Disabled faulthandler is a low-impact error.
pass
_run_init.done = True
return args
_run_init.done = False
def usage(shorthelp=False, writeto_stdout=False, detailed_error=None,
exitcode=None):
"""Writes __main__'s docstring to stderr with some help text.
Args:
shorthelp: bool, if True, prints only flags from the main module,
rather than all flags.
writeto_stdout: bool, if True, writes help message to stdout,
rather than to stderr.
detailed_error: str, additional detail about why usage info was presented.
exitcode: optional integer, if set, exits with this status code after
writing help.
"""
if writeto_stdout:
stdfile = sys.stdout
else:
stdfile = sys.stderr
doc = sys.modules['__main__'].__doc__
if not doc:
doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
else:
# Replace all '%s' with sys.argv[0], and all '%%' with '%'.
num_specifiers = doc.count('%') - 2 * doc.count('%%')
try:
doc %= (sys.argv[0],) * num_specifiers
except (OverflowError, TypeError, ValueError):
# Just display the docstring as-is.
pass
if shorthelp:
flag_str = FLAGS.main_module_help()
else:
flag_str = FLAGS.get_help()
try:
stdfile.write(doc)
if flag_str:
stdfile.write('\nflags:\n')
stdfile.write(flag_str)
stdfile.write('\n')
if detailed_error is not None:
stdfile.write('\n%s\n' % detailed_error)
except IOError as e:
# We avoid printing a huge backtrace if we get EPIPE, because
# "foo.par --help | less" is a frequent use case.
if e.errno != errno.EPIPE:
raise
if exitcode is not None:
sys.exit(exitcode)
class ExceptionHandler(object):
"""Base exception handler from which other may inherit."""
def wants(self, exc):
"""Returns whether this handler wants to handle the exception or not.
This base class returns True for all exceptions by default. Override in
subclass if it wants to be more selective.
Args:
exc: Exception, the current exception.
"""
del exc # Unused.
return True
def handle(self, exc):
"""Do something with the current exception.
Args:
exc: Exception, the current exception
This method must be overridden.
"""
raise NotImplementedError()
def install_exception_handler(handler):
"""Installs an exception handler.
Args:
handler: ExceptionHandler, the exception handler to install.
Raises:
TypeError: Raised when the handler was not of the correct type.
All installed exception handlers will be called if main() exits via
an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt,
FlagsError or UsageError.
"""
if not isinstance(handler, ExceptionHandler):
raise TypeError('handler of type %s does not inherit from ExceptionHandler'
% type(handler))
EXCEPTION_HANDLERS.append(handler)

View File

@ -0,0 +1,99 @@
from typing import Any, Callable, Collection, Iterable, List, NoReturn, Optional, Text, TypeVar, Union, overload
from absl.flags import _flag
_MainArgs = TypeVar('_MainArgs')
_Exc = TypeVar('_Exc', bound=Exception)
class ExceptionHandler():
def wants(self, exc: _Exc) -> bool:
...
def handle(self, exc: _Exc):
...
EXCEPTION_HANDLERS: List[ExceptionHandler] = ...
class HelpFlag(_flag.BooleanFlag):
def __init__(self):
...
class HelpshortFlag(HelpFlag):
...
class HelpfullFlag(_flag.BooleanFlag):
def __init__(self):
...
class HelpXMLFlag(_flag.BooleanFlag):
def __init__(self):
...
def define_help_flags() -> None:
...
@overload
def usage(shorthelp: Union[bool, int] = ...,
writeto_stdout: Union[bool, int] = ...,
detailed_error: Optional[Any] = ...,
exitcode: None = ...) -> None:
...
@overload
def usage(shorthelp: Union[bool, int] = ...,
writeto_stdout: Union[bool, int] = ...,
detailed_error: Optional[Any] = ...,
exitcode: int = ...) -> NoReturn:
...
def install_exception_handler(handler: ExceptionHandler) -> None:
...
class Error(Exception):
...
class UsageError(Error):
exitcode: int
def parse_flags_with_usage(args: List[Text]) -> List[Text]:
...
def call_after_init(callback: Callable[[], Any]) -> None:
...
# Without the flag_parser argument, `main` should require a List[Text].
@overload
def run(
main: Callable[[List[Text]], Any],
argv: Optional[List[Text]] = ...,
*,
) -> NoReturn:
...
@overload
def run(
main: Callable[[_MainArgs], Any],
argv: Optional[List[Text]] = ...,
*,
flags_parser: Callable[[List[Text]], _MainArgs],
) -> NoReturn:
...

View File

@ -0,0 +1,63 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tiny stand alone library to change the kernel process name on Linux."""
import os
import sys
# This library must be kept small and stand alone. It is used by small things
# that require no extension modules.
def make_process_name_useful():
"""Sets the process name to something better than 'python' if possible."""
set_kernel_process_name(os.path.basename(sys.argv[0]))
def set_kernel_process_name(name):
"""Changes the Kernel's /proc/self/status process name on Linux.
The kernel name is NOT what will be shown by the ps or top command.
It is a 15 character string stored in the kernel's process table that
is included in the kernel log when a process is OOM killed.
The first 15 bytes of name are used. Non-ASCII unicode is replaced with '?'.
Does nothing if /proc/self/comm cannot be written or prctl() fails.
Args:
name: bytes|unicode, the Linux kernel's command name to set.
"""
if not isinstance(name, bytes):
name = name.encode('ascii', 'replace')
try:
# This is preferred to using ctypes to try and call prctl() when possible.
with open('/proc/self/comm', 'wb') as proc_comm:
proc_comm.write(name[:15])
except EnvironmentError:
try:
import ctypes # pylint: disable=g-import-not-at-top
except ImportError:
return # No ctypes.
try:
libc = ctypes.CDLL('libc.so.6')
except EnvironmentError:
return # No libc.so.6.
pr_set_name = ctypes.c_ulong(15) # linux/prctl.h PR_SET_NAME value.
zero = ctypes.c_ulong(0)
try:
libc.prctl(pr_set_name, name, zero, zero, zero)
# Ignore the prctl return value. Nothing we can do if it errored.
except AttributeError:
return # No prctl.

View File

@ -0,0 +1,225 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package is used to define and parse command line flags.
This package defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each Python
module defines flags that are useful to it. When one Python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
"""
import getopt
import os
import re
import sys
import types
import warnings
from absl.flags import _argument_parser
from absl.flags import _defines
from absl.flags import _exceptions
from absl.flags import _flag
from absl.flags import _flagvalues
from absl.flags import _helpers
from absl.flags import _validators
__all__ = (
'DEFINE',
'DEFINE_flag',
'DEFINE_string',
'DEFINE_boolean',
'DEFINE_bool',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_enum',
'DEFINE_enum_class',
'DEFINE_list',
'DEFINE_spaceseplist',
'DEFINE_multi',
'DEFINE_multi_string',
'DEFINE_multi_integer',
'DEFINE_multi_float',
'DEFINE_multi_enum',
'DEFINE_multi_enum_class',
'DEFINE_alias',
# Flag validators.
'register_validator',
'validator',
'register_multi_flags_validator',
'multi_flags_validator',
'mark_flag_as_required',
'mark_flags_as_required',
'mark_flags_as_mutual_exclusive',
'mark_bool_flags_as_mutual_exclusive',
# Flag modifiers.
'set_default',
'override_value',
# Key flag related functions.
'declare_key_flag',
'adopt_module_key_flags',
'disclaim_key_flags',
# Module exceptions.
'Error',
'CantOpenFlagFileError',
'DuplicateFlagError',
'IllegalFlagValueError',
'UnrecognizedFlagError',
'UnparsedFlagAccessError',
'ValidationError',
'FlagNameConflictsWithMethodError',
# Public classes.
'Flag',
'BooleanFlag',
'EnumFlag',
'EnumClassFlag',
'MultiFlag',
'MultiEnumClassFlag',
'FlagHolder',
'FlagValues',
'ArgumentParser',
'BooleanParser',
'EnumParser',
'EnumClassParser',
'ArgumentSerializer',
'FloatParser',
'IntegerParser',
'BaseListParser',
'ListParser',
'ListSerializer',
'EnumClassListSerializer',
'CsvListSerializer',
'WhitespaceSeparatedListParser',
'EnumClassSerializer',
# Helper functions.
'get_help_width',
'text_wrap',
'flag_dict_to_args',
'doc_to_help',
# The global FlagValues instance.
'FLAGS',
)
# Initialize the FLAGS_MODULE as early as possible.
# It's only used by adopt_module_key_flags to take SPECIAL_FLAGS into account.
_helpers.FLAGS_MODULE = sys.modules[__name__]
# Add current module to disclaimed module ids.
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
# DEFINE functions. They are explained in more details in the module doc string.
# pylint: disable=invalid-name
DEFINE = _defines.DEFINE
DEFINE_flag = _defines.DEFINE_flag
DEFINE_string = _defines.DEFINE_string
DEFINE_boolean = _defines.DEFINE_boolean
DEFINE_bool = DEFINE_boolean # Match C++ API.
DEFINE_float = _defines.DEFINE_float
DEFINE_integer = _defines.DEFINE_integer
DEFINE_enum = _defines.DEFINE_enum
DEFINE_enum_class = _defines.DEFINE_enum_class
DEFINE_list = _defines.DEFINE_list
DEFINE_spaceseplist = _defines.DEFINE_spaceseplist
DEFINE_multi = _defines.DEFINE_multi
DEFINE_multi_string = _defines.DEFINE_multi_string
DEFINE_multi_integer = _defines.DEFINE_multi_integer
DEFINE_multi_float = _defines.DEFINE_multi_float
DEFINE_multi_enum = _defines.DEFINE_multi_enum
DEFINE_multi_enum_class = _defines.DEFINE_multi_enum_class
DEFINE_alias = _defines.DEFINE_alias
# pylint: enable=invalid-name
# Flag validators.
register_validator = _validators.register_validator
validator = _validators.validator
register_multi_flags_validator = _validators.register_multi_flags_validator
multi_flags_validator = _validators.multi_flags_validator
mark_flag_as_required = _validators.mark_flag_as_required
mark_flags_as_required = _validators.mark_flags_as_required
mark_flags_as_mutual_exclusive = _validators.mark_flags_as_mutual_exclusive
mark_bool_flags_as_mutual_exclusive = _validators.mark_bool_flags_as_mutual_exclusive
# Flag modifiers.
set_default = _defines.set_default
override_value = _defines.override_value
# Key flag related functions.
declare_key_flag = _defines.declare_key_flag
adopt_module_key_flags = _defines.adopt_module_key_flags
disclaim_key_flags = _defines.disclaim_key_flags
# Module exceptions.
# pylint: disable=invalid-name
Error = _exceptions.Error
CantOpenFlagFileError = _exceptions.CantOpenFlagFileError
DuplicateFlagError = _exceptions.DuplicateFlagError
IllegalFlagValueError = _exceptions.IllegalFlagValueError
UnrecognizedFlagError = _exceptions.UnrecognizedFlagError
UnparsedFlagAccessError = _exceptions.UnparsedFlagAccessError
ValidationError = _exceptions.ValidationError
FlagNameConflictsWithMethodError = _exceptions.FlagNameConflictsWithMethodError
# Public classes.
Flag = _flag.Flag
BooleanFlag = _flag.BooleanFlag
EnumFlag = _flag.EnumFlag
EnumClassFlag = _flag.EnumClassFlag
MultiFlag = _flag.MultiFlag
MultiEnumClassFlag = _flag.MultiEnumClassFlag
FlagHolder = _flagvalues.FlagHolder
FlagValues = _flagvalues.FlagValues
ArgumentParser = _argument_parser.ArgumentParser
BooleanParser = _argument_parser.BooleanParser
EnumParser = _argument_parser.EnumParser
EnumClassParser = _argument_parser.EnumClassParser
ArgumentSerializer = _argument_parser.ArgumentSerializer
FloatParser = _argument_parser.FloatParser
IntegerParser = _argument_parser.IntegerParser
BaseListParser = _argument_parser.BaseListParser
ListParser = _argument_parser.ListParser
ListSerializer = _argument_parser.ListSerializer
EnumClassListSerializer = _argument_parser.EnumClassListSerializer
CsvListSerializer = _argument_parser.CsvListSerializer
WhitespaceSeparatedListParser = _argument_parser.WhitespaceSeparatedListParser
EnumClassSerializer = _argument_parser.EnumClassSerializer
# pylint: enable=invalid-name
# Helper functions.
get_help_width = _helpers.get_help_width
text_wrap = _helpers.text_wrap
flag_dict_to_args = _helpers.flag_dict_to_args
doc_to_help = _helpers.doc_to_help
# Special flags.
_helpers.SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', '',
'Insert flag definitions from the given file into the command line.',
_helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types
DEFINE_string('undefok', '',
'comma-separated list of flag names that it is okay to specify '
'on the command line even if the program does not define a flag '
'with that name. IMPORTANT: flags in this list that have '
'arguments MUST use the --flag=value format.',
_helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types
#: The global FlagValues instance.
FLAGS = _flagvalues.FLAGS

View File

@ -0,0 +1,638 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains base classes used to parse and convert arguments.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
import collections
import csv
import enum
import io
import string
from typing import Generic, List, Iterable, Optional, Sequence, Text, Type, TypeVar, Union
from xml.dom import minidom
from absl.flags import _helpers
_T = TypeVar('_T')
_ET = TypeVar('_ET', bound=enum.Enum)
_N = TypeVar('_N', int, float)
def _is_integer_type(instance):
"""Returns True if instance is an integer, and not a bool."""
return (isinstance(instance, int) and
not isinstance(instance, bool))
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for cls with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
*args: Positional initializer arguments.
**kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(cls, *args, **kwargs)
else:
instances = cls._instances
key = (cls,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(cls, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(cls, *args)
class ArgumentParser(Generic[_T], metaclass=_ArgumentParserCache):
"""Base class used to parse and convert arguments.
The :meth:`parse` method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a ``ValueError`` exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help: Text = ''
def parse(self, argument: Text) -> Optional[_T]:
"""Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
TypeError: Raised when the argument has the wrong type.
Returns:
The parsed value in native type.
"""
if not isinstance(argument, str):
raise TypeError('flag value must be a string, found "{}"'.format(
type(argument)))
return argument
def flag_type(self) -> Text:
"""Returns a string representing the type of the flag."""
return 'string'
def _custom_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
"""Returns a list of minidom.Element to add additional flag information.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
"""
del doc # Unused.
return []
class ArgumentSerializer(Generic[_T]):
"""Base class for generating string representations of a flag value."""
def serialize(self, value: _T) -> Text:
"""Returns a serialized string of the value."""
return str(value)
class NumericParser(ArgumentParser[_N]):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
lower_bound: Optional[_N]
upper_bound: Optional[_N]
def is_outside_bounds(self, val: _N) -> bool:
"""Returns whether the value is outside the bounds or not."""
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def parse(self, argument: Text) -> _N:
"""See base class."""
val = self.convert(argument)
if self.is_outside_bounds(val):
raise ValueError('%s is not %s' % (val, self.syntactic_help))
return val
def _custom_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = []
if self.lower_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'lower_bound', self.lower_bound))
if self.upper_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'upper_bound', self.upper_bound))
return elements
def convert(self, argument: Text) -> _N:
"""Returns the correct numeric value of argument.
Subclass must implement this method, and raise TypeError if argument is not
string or has the right numeric type.
Args:
argument: string argument passed in the commandline, or the numeric type.
Raises:
TypeError: Raised when argument is not a string or the right numeric type.
ValueError: Raised when failed to convert argument to the numeric value.
"""
raise NotImplementedError
class FloatParser(NumericParser[float]):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'a'
number_name = 'number'
syntactic_help = ' '.join((number_article, number_name))
def __init__(
self,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
) -> None:
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument: Union[int, float, str]) -> float:
"""Returns the float value of argument."""
if (_is_integer_type(argument) or isinstance(argument, float) or
isinstance(argument, str)):
return float(argument)
else:
raise TypeError(
'Expect argument to be a string, int, or float, found {}'.format(
type(argument)))
def flag_type(self) -> Text:
"""See base class."""
return 'float'
class IntegerParser(NumericParser[int]):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'an'
number_name = 'integer'
syntactic_help = ' '.join((number_article, number_name))
def __init__(
self, lower_bound: Optional[int] = None, upper_bound: Optional[int] = None
) -> None:
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = 'a positive %s' % self.number_name
elif upper_bound == -1:
sh = 'a negative %s' % self.number_name
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument: Union[int, Text]) -> int:
"""Returns the int value of argument."""
if _is_integer_type(argument):
return argument
elif isinstance(argument, str):
base = 10
if len(argument) > 2 and argument[0] == '0':
if argument[1] == 'o':
base = 8
elif argument[1] == 'x':
base = 16
return int(argument, base)
else:
raise TypeError('Expect argument to be a string or int, found {}'.format(
type(argument)))
def flag_type(self) -> Text:
"""See base class."""
return 'int'
class BooleanParser(ArgumentParser[bool]):
"""Parser of boolean values."""
def parse(self, argument: Union[Text, int]) -> bool:
"""See base class."""
if isinstance(argument, str):
if argument.lower() in ('true', 't', '1'):
return True
elif argument.lower() in ('false', 'f', '0'):
return False
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
elif isinstance(argument, int):
# Only allow bool or integer 0, 1.
# Note that float 1.0 == True, 0.0 == False.
bool_value = bool(argument)
if argument == bool_value:
return bool_value
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
raise TypeError('Non-boolean argument to boolean flag', argument)
def flag_type(self) -> Text:
"""See base class."""
return 'bool'
class EnumParser(ArgumentParser[Text]):
"""Parser of a string enum value (a string value from a given set)."""
def __init__(
self, enum_values: Iterable[Text], case_sensitive: bool = True
) -> None:
"""Initializes EnumParser.
Args:
enum_values: [str], a non-empty list of string values in the enum.
case_sensitive: bool, whether or not the enum is to be case-sensitive.
Raises:
ValueError: When enum_values is empty.
"""
if not enum_values:
raise ValueError(
'enum_values cannot be empty, found "{}"'.format(enum_values))
if isinstance(enum_values, str):
raise ValueError(
'enum_values cannot be a str, found "{}"'.format(enum_values)
)
super(EnumParser, self).__init__()
self.enum_values = list(enum_values)
self.case_sensitive = case_sensitive
def parse(self, argument: Text) -> Text:
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str, the supplied flag value.
Returns:
The first matching element from enum_values.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0]
def flag_type(self) -> Text:
"""See base class."""
return 'string enum'
class EnumClassParser(ArgumentParser[_ET]):
"""Parser of an Enum class member."""
def __init__(
self, enum_class: Type[_ET], case_sensitive: bool = True
) -> None:
"""Initializes EnumParser.
Args:
enum_class: class, the Enum class with all possible flag values.
case_sensitive: bool, whether or not the enum is to be case-sensitive. If
False, all member names must be unique when case is ignored.
Raises:
TypeError: When enum_class is not a subclass of Enum.
ValueError: When enum_class is empty.
"""
if not issubclass(enum_class, enum.Enum):
raise TypeError('{} is not a subclass of Enum.'.format(enum_class))
if not enum_class.__members__:
raise ValueError('enum_class cannot be empty, but "{}" is empty.'
.format(enum_class))
if not case_sensitive:
members = collections.Counter(
name.lower() for name in enum_class.__members__)
duplicate_keys = {
member for member, count in members.items() if count > 1
}
if duplicate_keys:
raise ValueError(
'Duplicate enum values for {} using case_sensitive=False'.format(
duplicate_keys))
super(EnumClassParser, self).__init__()
self.enum_class = enum_class
self._case_sensitive = case_sensitive
if case_sensitive:
self._member_names = tuple(enum_class.__members__)
else:
self._member_names = tuple(
name.lower() for name in enum_class.__members__)
@property
def member_names(self) -> Sequence[Text]:
"""The accepted enum names, in lowercase if not case sensitive."""
return self._member_names
def parse(self, argument: Union[_ET, Text]) -> _ET:
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str or Enum class member, the supplied flag value.
Returns:
The first matching Enum class member in Enum class.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if isinstance(argument, self.enum_class):
return argument # pytype: disable=bad-return-type
elif not isinstance(argument, str):
raise ValueError(
'{} is not an enum member or a name of a member in {}'.format(
argument, self.enum_class))
key = EnumParser(
self._member_names, case_sensitive=self._case_sensitive).parse(argument)
if self._case_sensitive:
return self.enum_class[key]
else:
# If EnumParser.parse() return a value, we're guaranteed to find it
# as a member of the class
return next(value for name, value in self.enum_class.__members__.items()
if name.lower() == key.lower())
def flag_type(self) -> Text:
"""See base class."""
return 'enum class'
class ListSerializer(Generic[_T], ArgumentSerializer[List[_T]]):
def __init__(self, list_sep: Text) -> None:
self.list_sep = list_sep
def serialize(self, value: List[_T]) -> Text:
"""See base class."""
return self.list_sep.join([str(x) for x in value])
class EnumClassListSerializer(ListSerializer[_ET]):
"""A serializer for :class:`MultiEnumClass` flags.
This serializer simply joins the output of `EnumClassSerializer` using a
provided separator.
"""
def __init__(self, list_sep: Text, **kwargs) -> None:
"""Initializes EnumClassListSerializer.
Args:
list_sep: String to be used as a separator when serializing
**kwargs: Keyword arguments to the `EnumClassSerializer` used to serialize
individual values.
"""
super(EnumClassListSerializer, self).__init__(list_sep)
self._element_serializer = EnumClassSerializer(**kwargs)
def serialize(self, value: Union[_ET, List[_ET]]) -> Text:
"""See base class."""
if isinstance(value, list):
return self.list_sep.join(
self._element_serializer.serialize(x) for x in value)
else:
return self._element_serializer.serialize(value)
class CsvListSerializer(ListSerializer[Text]):
def serialize(self, value: List[Text]) -> Text:
"""Serializes a list as a CSV string or unicode."""
output = io.StringIO()
writer = csv.writer(output, delimiter=self.list_sep)
writer.writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return str(serialized_value)
class EnumClassSerializer(ArgumentSerializer[_ET]):
"""Class for generating string representations of an enum class flag value."""
def __init__(self, lowercase: bool) -> None:
"""Initializes EnumClassSerializer.
Args:
lowercase: If True, enum member names are lowercased during serialization.
"""
self._lowercase = lowercase
def serialize(self, value: _ET) -> Text:
"""Returns a serialized string of the Enum class value."""
as_string = str(value.name)
return as_string.lower() if self._lowercase else as_string
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass ``__init__``, call::
super().__init__(token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(
self, token: Optional[Text] = None, name: Optional[Text] = None
) -> None:
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = 'a %s separated list' % self._name
def parse(self, argument: Text) -> List[Text]:
"""See base class."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
return [s.strip() for s in argument.split(self._token)]
def flag_type(self) -> Text:
"""See base class."""
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self) -> None:
super(ListParser, self).__init__(',', 'comma')
def parse(self, argument: Union[Text, List[Text]]) -> List[Text]:
"""Parses argument as comma-separated list of strings."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
def _custom_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = super(ListParser, self)._custom_xml_dom_elements(doc)
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(',')))
return elements
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self, comma_compat: bool = False) -> None:
"""Initializer.
Args:
comma_compat: bool, whether to support comma as an additional separator.
If False then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
"""
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
super(WhitespaceSeparatedListParser, self).__init__(None, name)
def parse(self, argument: Union[Text, List[Text]]) -> List[Text]:
"""Parses argument as whitespace-separated list of strings.
It also parses argument as comma-separated list of strings if requested.
Args:
argument: string argument passed in the commandline.
Returns:
[str], the parsed flag value.
"""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
def _custom_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = super(WhitespaceSeparatedListParser, self
)._custom_xml_dom_elements(doc)
separators = list(string.whitespace)
if self._comma_compat:
separators.append(',')
separators.sort()
for sep_char in separators:
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(sep_char)))
return elements

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exception classes in ABSL flags library.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
import sys
from absl.flags import _helpers
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
class Error(Exception):
"""The base class for all flags errors."""
class CantOpenFlagFileError(Error):
"""Raised when flagfile fails to open.
E.g. the file doesn't exist, or has wrong permissions.
"""
class DuplicateFlagError(Error):
"""Raised if there is a flag naming conflict."""
@classmethod
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Creates a DuplicateFlagError by providing flag name and values.
Args:
flagname: str, the name of the flag being redefined.
flag_values: :class:`FlagValues`, the FlagValues instance containing the
first definition of flagname.
other_flag_values: :class:`FlagValues`, if it is not None, it should be
the FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting to
create the flag a second time, and we use the module calling this one
as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.find_module_defining_flag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.get_calling_module()
else:
second_module = other_flag_values.find_module_defining_flag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg)
class IllegalFlagValueError(Error):
"""Raised when the flag command line argument is illegal."""
class UnrecognizedFlagError(Error):
"""Raised when a flag is unrecognized.
Attributes:
flagname: str, the name of the unrecognized flag.
flagvalue: The value of the flag, empty if the flag is not defined.
"""
def __init__(self, flagname, flagvalue='', suggestions=None):
self.flagname = flagname
self.flagvalue = flagvalue
if suggestions:
# Space before the question mark is intentional to not include it in the
# selection when copy-pasting the suggestion from (some) terminals.
tip = '. Did you mean: %s ?' % ', '.join(suggestions)
else:
tip = ''
super(UnrecognizedFlagError, self).__init__(
'Unknown command line flag \'%s\'%s' % (flagname, tip))
class UnparsedFlagAccessError(Error):
"""Raised when accessing the flag value from unparsed :class:`FlagValues`."""
class ValidationError(Error):
"""Raised when flag validator constraint is not satisfied."""
class FlagNameConflictsWithMethodError(Error):
"""Raised when a flag name conflicts with :class:`FlagValues` methods."""

View File

@ -0,0 +1,556 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains Flag class - information about single command-line flag.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
from collections import abc
import copy
import enum
import functools
from typing import Any, Dict, Generic, Iterable, List, Optional, Text, Type, TypeVar, Union
from xml.dom import minidom
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _helpers
_T = TypeVar('_T')
_ET = TypeVar('_ET', bound=enum.Enum)
@functools.total_ordering
class Flag(Generic[_T]):
"""Information about a command-line flag.
Attributes:
name: the name for this flag
default: the default value for this flag
default_unparsed: the unparsed default value for this flag.
default_as_str: default value as repr'd string, e.g., "'true'"
(or None)
value: the most recent parsed value of this flag set by :meth:`parse`
help: a help string or None if no help is available
short_name: the single letter alias for this flag (or None)
boolean: if 'true', this flag does not accept arguments
present: true if this flag was parsed from command line flags
parser: an :class:`~absl.flags.ArgumentParser` object
serializer: an ArgumentSerializer object
allow_override: the flag may be redefined without raising an error,
and newly defined flag overrides the old one.
allow_override_cpp: use the flag from C++ if available the flag
definition is replaced by the C++ flag after init
allow_hide_cpp: use the Python flag despite having a C++ flag with
the same name (ignore the C++ flag)
using_default_value: the flag value has not been set by user
allow_overwrite: the flag may be parsed more than once without
raising an error, the last set value will be used
allow_using_method_names: whether this flag can be defined even if
it has a name that conflicts with a FlagValues method.
validators: list of the flag validators.
The only public method of a ``Flag`` object is :meth:`parse`, but it is
typically only called by a :class:`~absl.flags.FlagValues` object. The
:meth:`parse` method is a thin wrapper around the
:meth:`ArgumentParser.parse()<absl.flags.ArgumentParser.parse>` method. The
parsed value is saved in ``.value``, and the ``.present`` attribute is
updated. If this flag was already present, an Error is raised.
:meth:`parse` is also called during ``__init__`` to parse the default value
and initialize the ``.value`` attribute. This enables other python modules to
safely use flags even if the ``__main__`` module neglects to parse the
command line arguments. The ``.present`` attribute is cleared after
``__init__`` parsing. If the default value is set to ``None``, then the
``__init__`` parsing step is skipped and the ``.value`` attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
# NOTE: pytype doesn't find defaults without this.
default: Optional[_T]
default_as_str: Optional[Text]
default_unparsed: Union[Optional[_T], Text]
def __init__(
self,
parser: _argument_parser.ArgumentParser[_T],
serializer: Optional[_argument_parser.ArgumentSerializer[_T]],
name: Text,
default: Union[Optional[_T], Text],
help_string: Optional[Text],
short_name: Optional[Text] = None,
boolean: bool = False,
allow_override: bool = False,
allow_override_cpp: bool = False,
allow_hide_cpp: bool = False,
allow_overwrite: bool = True,
allow_using_method_names: bool = False,
) -> None:
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.allow_override_cpp = allow_override_cpp
self.allow_hide_cpp = allow_hide_cpp
self.allow_overwrite = allow_overwrite
self.allow_using_method_names = allow_using_method_names
self.using_default_value = True
self._value = None
self.validators = []
if self.allow_hide_cpp and self.allow_override_cpp:
raise _exceptions.Error(
"Can't have both allow_hide_cpp (means use Python flag) and "
'allow_override_cpp (means use C++ flag after InitGoogle)')
self._set_default(default)
@property
def value(self) -> Optional[_T]:
return self._value
@value.setter
def value(self, value: Optional[_T]):
self._value = value
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __bool__(self):
raise TypeError('A Flag instance would always be True. '
'Did you mean to test the `.value` attribute?')
def __getstate__(self):
raise TypeError("can't pickle Flag objects")
def __copy__(self):
raise TypeError('%s does not support shallow copies. '
'Use copy.deepcopy instead.' % type(self).__name__)
def __deepcopy__(self, memo: Dict[int, Any]) -> 'Flag[_T]':
result = object.__new__(type(self))
result.__dict__ = copy.deepcopy(self.__dict__, memo)
return result
def _get_parsed_value_as_string(self, value: Optional[_T]) -> Optional[Text]:
"""Returns parsed flag value as string."""
if value is None:
return None
if self.serializer:
return repr(self.serializer.serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(str(value))
def parse(self, argument: Union[Text, Optional[_T]]) -> None:
"""Parses string and sets flag value.
Args:
argument: str or the correct flag value type, argument to be parsed.
"""
if self.present and not self.allow_overwrite:
raise _exceptions.IllegalFlagValueError(
'flag --%s=%s: already defined as %s' % (
self.name, argument, self.value))
self.value = self._parse(argument)
self.present += 1
def _parse(self, argument: Union[Text, _T]) -> Optional[_T]:
"""Internal parse function.
It returns the parsed value, and does not modify class states.
Args:
argument: str or the correct flag value type, argument to be parsed.
Returns:
The parsed value.
"""
try:
return self.parser.parse(argument)
except (TypeError, ValueError) as e: # Recast as IllegalFlagValueError.
raise _exceptions.IllegalFlagValueError(
'flag --%s=%s: %s' % (self.name, argument, e))
def unparse(self) -> None:
self.value = self.default
self.using_default_value = True
self.present = 0
def serialize(self) -> Text:
"""Serializes the flag."""
return self._serialize(self.value)
def _serialize(self, value: Optional[_T]) -> Text:
"""Internal serialize function."""
if value is None:
return ''
if self.boolean:
if value:
return '--%s' % self.name
else:
return '--no%s' % self.name
else:
if not self.serializer:
raise _exceptions.Error(
'Serializer not present for flag %s' % self.name)
return '--%s=%s' % (self.name, self.serializer.serialize(value))
def _set_default(self, value: Union[Optional[_T], Text]) -> None:
"""Changes the default value (and current value too) for this Flag."""
self.default_unparsed = value
if value is None:
self.default = None
else:
self.default = self._parse_from_default(value)
self.default_as_str = self._get_parsed_value_as_string(self.default)
if self.using_default_value:
self.value = self.default
# This is split out so that aliases can skip regular parsing of the default
# value.
def _parse_from_default(self, value: Union[Text, _T]) -> Optional[_T]:
return self._parse(value)
def flag_type(self) -> Text:
"""Returns a str that describes the type of the flag.
NOTE: we use strings, and not the types.*Type constants because
our flags can have more exotic types, e.g., 'comma separated list
of strings', 'whitespace separated list of strings', etc.
"""
return self.parser.flag_type()
def _create_xml_dom_element(
self, doc: minidom.Document, module_name: str, is_key: bool = False
) -> minidom.Element:
"""Returns an XML element that contains this flag's information.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _ExtraXMLInfo.
Please do NOT override this method.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
module_name: str,, the name of the module that defines this flag.
is_key: boolean, True iff this flag is key for main module.
Returns:
A minidom.Element instance.
"""
element = doc.createElement('flag')
if is_key:
element.appendChild(_helpers.create_xml_dom_element(doc, 'key', 'yes'))
element.appendChild(_helpers.create_xml_dom_element(
doc, 'file', module_name))
# Adds flag features that are relevant for all flags.
element.appendChild(_helpers.create_xml_dom_element(doc, 'name', self.name))
if self.short_name:
element.appendChild(_helpers.create_xml_dom_element(
doc, 'short_name', self.short_name))
if self.help:
element.appendChild(_helpers.create_xml_dom_element(
doc, 'meaning', self.help))
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
if self.default is not None:
default_serialized = self.serializer.serialize(self.default)
else:
default_serialized = ''
else:
default_serialized = self.default
element.appendChild(_helpers.create_xml_dom_element(
doc, 'default', default_serialized))
value_serialized = self._serialize_value_for_xml(self.value)
element.appendChild(_helpers.create_xml_dom_element(
doc, 'current', value_serialized))
element.appendChild(_helpers.create_xml_dom_element(
doc, 'type', self.flag_type()))
# Adds extra flag features this flag may have.
for e in self._extra_xml_dom_elements(doc):
element.appendChild(e)
return element
def _serialize_value_for_xml(self, value: Optional[_T]) -> Any:
"""Returns the serialized value, for use in an XML help text."""
return value
def _extra_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
"""Returns extra info about this flag in XML.
"Extra" means "not already included by _create_xml_dom_element above."
Args:
doc: minidom.Document, the DOM document it should create nodes from.
Returns:
A list of minidom.Element.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
return self.parser._custom_xml_dom_elements(doc) # pylint: disable=protected-access
class BooleanFlag(Flag[bool]):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
``True`` (1) or ``False`` (0). The false value is specified on the command
line by prepending the word ``'no'`` to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
``'update'`` and whose short name was ``'x'``, then this flag could be
explicitly unset through either ``--noupdate`` or ``--nox``.
"""
def __init__(
self,
name: Text,
default: Union[Optional[bool], Text],
help: Optional[Text], # pylint: disable=redefined-builtin
short_name: Optional[Text] = None,
**args
) -> None:
p = _argument_parser.BooleanParser()
super(BooleanFlag, self).__init__(
p, None, name, default, help, short_name, True, **args
)
class EnumFlag(Flag[Text]):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(
self,
name: Text,
default: Optional[Text],
help: Optional[Text], # pylint: disable=redefined-builtin
enum_values: Iterable[Text],
short_name: Optional[Text] = None,
case_sensitive: bool = True,
**args
):
p = _argument_parser.EnumParser(enum_values, case_sensitive)
g = _argument_parser.ArgumentSerializer()
super(EnumFlag, self).__init__(
p, g, name, default, help, short_name, **args)
# NOTE: parser should be typed EnumParser but the constructor
# restricts the available interface to ArgumentParser[str].
self.parser = p
self.help = '<%s>: %s' % ('|'.join(p.enum_values), self.help)
def _extra_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = []
for enum_value in self.parser.enum_values:
elements.append(_helpers.create_xml_dom_element(
doc, 'enum_value', enum_value))
return elements
class EnumClassFlag(Flag[_ET]):
"""Basic enum flag; its value is an enum class's member."""
def __init__(
self,
name: Text,
default: Union[Optional[_ET], Text],
help: Optional[Text], # pylint: disable=redefined-builtin
enum_class: Type[_ET],
short_name: Optional[Text] = None,
case_sensitive: bool = False,
**args
):
p = _argument_parser.EnumClassParser(
enum_class, case_sensitive=case_sensitive)
g = _argument_parser.EnumClassSerializer(lowercase=not case_sensitive)
super(EnumClassFlag, self).__init__(
p, g, name, default, help, short_name, **args)
# NOTE: parser should be typed EnumClassParser[_ET] but the constructor
# restricts the available interface to ArgumentParser[_ET].
self.parser = p
self.help = '<%s>: %s' % ('|'.join(p.member_names), self.help)
def _extra_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = []
for enum_value in self.parser.enum_class.__members__.keys():
elements.append(_helpers.create_xml_dom_element(
doc, 'enum_value', enum_value))
return elements
class MultiFlag(Generic[_T], Flag[List[_T]]):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or an iterable of values.
A single value is transformed into a single-item list of that value.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
super(MultiFlag, self).__init__(*args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def parse(self, arguments: Union[Text, _T, Iterable[_T]]): # pylint: disable=arguments-renamed
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
new_values = self._parse(arguments)
if self.present:
self.value.extend(new_values)
else:
self.value = new_values
self.present += len(new_values)
def _parse(self, arguments: Union[Text, Optional[Iterable[_T]]]) -> List[_T]: # pylint: disable=arguments-renamed
if (isinstance(arguments, abc.Iterable) and
not isinstance(arguments, str)):
arguments = list(arguments)
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
return [super(MultiFlag, self)._parse(item) for item in arguments]
def _serialize(self, value: Optional[List[_T]]) -> Text:
"""See base class."""
if not self.serializer:
raise _exceptions.Error(
'Serializer not present for flag %s' % self.name)
if value is None:
return ''
serialized_items = [
super(MultiFlag, self)._serialize(value_item) for value_item in value
]
return '\n'.join(serialized_items)
def flag_type(self):
"""See base class."""
return 'multi ' + self.parser.flag_type()
def _extra_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = []
if hasattr(self.parser, 'enum_values'):
for enum_value in self.parser.enum_values: # pytype: disable=attribute-error
elements.append(_helpers.create_xml_dom_element(
doc, 'enum_value', enum_value))
return elements
class MultiEnumClassFlag(MultiFlag[_ET]): # pytype: disable=not-indexable
"""A multi_enum_class flag.
See the __doc__ for MultiFlag for most behaviors of this class. In addition,
this class knows how to handle enum.Enum instances as values for this flag
type.
"""
def __init__(
self,
name: str,
default: Union[None, Iterable[_ET], _ET, Iterable[Text], Text],
help_string: str,
enum_class: Type[_ET],
case_sensitive: bool = False,
**args
):
p = _argument_parser.EnumClassParser(
enum_class, case_sensitive=case_sensitive)
g = _argument_parser.EnumClassListSerializer(
list_sep=',', lowercase=not case_sensitive)
super(MultiEnumClassFlag, self).__init__(
p, g, name, default, help_string, **args)
# NOTE: parser should be typed EnumClassParser[_ET] but the constructor
# restricts the available interface to ArgumentParser[str].
self.parser = p
# NOTE: serializer should be non-Optional but this isn't inferred.
self.serializer = g
self.help = (
'<%s>: %s;\n repeat this option to specify a list of values' %
('|'.join(p.member_names), help_string or '(no help available)'))
def _extra_xml_dom_elements(
self, doc: minidom.Document
) -> List[minidom.Element]:
elements = []
for enum_value in self.parser.enum_class.__members__.keys(): # pytype: disable=attribute-error
elements.append(_helpers.create_xml_dom_element(
doc, 'enum_value', enum_value))
return elements
def _serialize_value_for_xml(self, value):
"""See base class."""
if value is not None:
if not self.serializer:
raise _exceptions.Error(
'Serializer not present for flag %s' % self.name
)
value_serialized = self.serializer.serialize(value)
else:
value_serialized = ''
return value_serialized

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,421 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal helper functions for Abseil Python flags library."""
import os
import re
import struct
import sys
import textwrap
import types
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Set
from xml.dom import minidom
# pylint: disable=g-import-not-at-top
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
# pylint: enable=g-import-not-at-top
_DEFAULT_HELP_WIDTH = 80 # Default width of help output.
# Minimal "sane" width of help output. We assume that any value below 40 is
# unreasonable.
_MIN_HELP_WIDTH = 40
# Define the allowed error rate in an input string to get suggestions.
#
# We lean towards a high threshold because we tend to be matching a phrase,
# and the simple algorithm used here is geared towards correcting word
# spellings.
#
# For manual testing, consider "<command> --list" which produced a large number
# of spurious suggestions when we used "least_errors > 0.5" instead of
# "least_erros >= 0.5".
_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50
# Characters that cannot appear or are highly discouraged in an XML 1.0
# document. (See http://www.w3.org/TR/REC-xml/#charsets or
# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0)
_ILLEGAL_XML_CHARS_REGEX = re.compile(
u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]')
# This is a set of module ids for the modules that disclaim key flags.
# This module is explicitly added to this set so that we never consider it to
# define key flag.
disclaim_module_ids: Set[int] = set([id(sys.modules[__name__])])
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module.
# Initialized inside flagvalues.py.
# NOTE: This cannot be annotated as its actual FlagValues type since this would
# create a circular dependency.
SPECIAL_FLAGS: Any = None
# This points to the flags module, initialized in flags/__init__.py.
# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into
# account.
FLAGS_MODULE: types.ModuleType = None
class _ModuleObjectAndName(NamedTuple):
"""Module object and name.
Fields:
- module: object, module object.
- module_name: str, module name.
"""
module: types.ModuleType
module_name: str
def get_module_object_and_name(
globals_dict: Dict[str, Any]
) -> _ModuleObjectAndName:
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
_ModuleObjectAndName - pair of module object & module name.
Returns (None, None) if the module could not be identified.
"""
name = globals_dict.get('__name__', None)
module = sys.modules.get(name, None)
# Pick a more informative name for the main module.
return _ModuleObjectAndName(module,
(sys.argv[0] if name == '__main__' else name))
def get_calling_module_object_and_name() -> _ModuleObjectAndName:
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
Returns:
The module object that called into this one.
Raises:
AssertionError: Raised when no calling module could be identified.
"""
for depth in range(1, sys.getrecursionlimit()):
# sys._getframe is the right thing to use here, as it's the best
# way to walk up the call stack.
globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access
module, module_name = get_module_object_and_name(globals_for_frame)
if id(module) not in disclaim_module_ids and module_name is not None:
return _ModuleObjectAndName(module, module_name)
raise AssertionError('No module was found')
def get_calling_module() -> str:
"""Returns the name of the module that's calling into this module."""
return get_calling_module_object_and_name().module_name
def create_xml_dom_element(
doc: minidom.Document, name: str, value: Any
) -> minidom.Element:
"""Returns an XML DOM element with name and text value.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
name: str, the tag of XML element.
value: object, whose string representation will be used
as the value of the XML element. Illegal or highly discouraged xml 1.0
characters are stripped.
Returns:
An instance of minidom.Element.
"""
s = str(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
s = s.lower()
# Remove illegal xml characters.
s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)
e = doc.createElement(name)
e.appendChild(doc.createTextNode(s))
return e
def get_help_width() -> int:
"""Returns the integer width of help lines that is used in TextWrap."""
if not sys.stdout.isatty() or termios is None or fcntl is None:
return _DEFAULT_HELP_WIDTH
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, b'1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable.
if columns >= _MIN_HELP_WIDTH:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH))
except (TypeError, IOError, struct.error):
return _DEFAULT_HELP_WIDTH
def get_flag_suggestions(
attempt: Optional[str], longopt_list: Sequence[str]
) -> List[str]:
"""Returns helpful similar matches for an invalid flag."""
# Don't suggest on very short strings, or if no longopts are specified.
if len(attempt) <= 2 or not longopt_list:
return []
option_names = [v.split('=')[0] for v in longopt_list]
# Find close approximations in flag prefixes.
# This also handles the case where the flag is spelled right but ambiguous.
distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option)
for option in option_names]
# t[0] is distance, and sorting by t[1] allows us to have stable output.
distances.sort()
least_errors, _ = distances[0]
# Don't suggest excessively bad matches.
if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
return []
suggestions = []
for errors, name in distances:
if errors == least_errors:
suggestions.append(name)
else:
break
return suggestions
def _damerau_levenshtein(a, b):
"""Returns Damerau-Levenshtein edit distance from a to b."""
memo = {}
def distance(x, y):
"""Recursively defined string distance with memoization."""
if (x, y) in memo:
return memo[x, y]
if not x:
d = len(y)
elif not y:
d = len(x)
else:
d = min(
distance(x[1:], y) + 1, # correct an insertion error
distance(x, y[1:]) + 1, # correct a deletion error
distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character
if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:
# Correct a transposition.
t = distance(x[2:], y[2:]) + 1
if d > t:
d = t
memo[x, y] = d
return d
return distance(a, b)
def text_wrap(
text: str,
length: Optional[int] = None,
indent: str = '',
firstline_indent: Optional[str] = None,
) -> str:
"""Wraps a given text to a maximum line length and returns it.
It turns lines that only contain whitespace into empty lines, keeps new lines,
and expands tabs using 4 spaces.
Args:
text: str, text to wrap.
length: int, maximum length of a line, includes indentation.
If this is None then use get_help_width()
indent: str, indent for all but first line.
firstline_indent: str, indent for first line; if None, fall back to indent.
Returns:
str, the wrapped text.
Raises:
ValueError: Raised if indent or firstline_indent not shorter than length.
"""
# Get defaults where callee used None
if length is None:
length = get_help_width()
if indent is None:
indent = ''
if firstline_indent is None:
firstline_indent = indent
if len(indent) >= length:
raise ValueError('Length of indent exceeds length')
if len(firstline_indent) >= length:
raise ValueError('Length of first line indent exceeds length')
text = text.expandtabs(4)
result = []
# Create one wrapper for the first paragraph and one for subsequent
# paragraphs that does not have the initial wrapping.
wrapper = textwrap.TextWrapper(
width=length, initial_indent=firstline_indent, subsequent_indent=indent)
subsequent_wrapper = textwrap.TextWrapper(
width=length, initial_indent=indent, subsequent_indent=indent)
# textwrap does not have any special treatment for newlines. From the docs:
# "...newlines may appear in the middle of a line and cause strange output.
# For this reason, text should be split into paragraphs (using
# str.splitlines() or similar) which are wrapped separately."
for paragraph in (p.strip() for p in text.splitlines()):
if paragraph:
result.extend(wrapper.wrap(paragraph))
else:
result.append('') # Keep empty lines.
# Replace initial wrapper with wrapper for subsequent paragraphs.
wrapper = subsequent_wrapper
return '\n'.join(result)
def flag_dict_to_args(
flag_map: Dict[str, Any], multi_flags: Optional[Set[str]] = None
) -> Iterable[str]:
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: dict, a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is ``None``, then only the name is emitted.
* If value is ``True``, then only the name is emitted.
* If value is ``False``, then only the name prepended with 'no' is
emitted.
* If value is a string then ``--name=value`` is emitted.
* If value is a collection, this will emit
``--name=value1,value2,value3``, unless the flag name is in
``multi_flags``, in which case this will emit
``--name=value1 --name=value2 --name=value3``.
* Everything else is converted to string an passed as such.
multi_flags: set, names (strings) of flags that should be treated as
multi-flags.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in flag_map.items():
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
if multi_flags and key in multi_flags:
for item in value:
yield '--%s=%s' % (key, str(item))
else:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value)
def trim_docstring(docstring: str) -> str:
"""Removes indentation from triple-quoted strings.
This is the function specified in PEP 257 to handle docstrings:
https://www.python.org/dev/peps/pep-0257/.
Args:
docstring: str, a python docstring.
Returns:
str, docstring with indentation removed.
"""
if not docstring:
return ''
# If you've got a line longer than this you have other problems...
max_indent = 1 << 29
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = max_indent
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < max_indent:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def doc_to_help(doc: str) -> str:
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines.
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings.
doc = trim_docstring(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space.
# 1) keep double new lines;
# 2) keep ws after new lines if not empty line;
# 3) all other new lines shall be changed to a space;
# Solution: Match new lines between non white space and replace with space.
doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M)
return doc

View File

@ -0,0 +1,352 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to enforce different constraints on flags.
Flags validators can be registered using following functions / decorators::
flags.register_validator
@flags.validator
flags.register_multi_flags_validator
@flags.multi_flags_validator
Three convenience functions are also provided for common flag constraints::
flags.mark_flag_as_required
flags.mark_flags_as_required
flags.mark_flags_as_mutual_exclusive
flags.mark_bool_flags_as_mutual_exclusive
See their docstring in this module for a usage manual.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
import warnings
from absl.flags import _exceptions
from absl.flags import _flagvalues
from absl.flags import _validators_classes
def register_validator(flag_name,
checker,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str | FlagHolder, name or holder of the flag to be checked.
Positional-only parameter.
checker: callable, a function to validate the flag.
* input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
* output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either ``return False`` or
``raise flags.ValidationError(desired_error_message)``.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
ValueError: Raised when flag_values is non-default and does not match the
FlagValues of the provided FlagHolder instance.
"""
flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values)
v = _validators_classes.SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v)
def validator(flag_name, message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""A function decorator for defining a flag validator.
Registers the decorated function as a validator for flag_name, e.g.::
@flags.validator('foo')
def _CheckFoo(foo):
...
See :func:`register_validator` for the specification of checker function.
Args:
flag_name: str | FlagHolder, name or holder of the flag to be checked.
Positional-only parameter.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
def decorate(function):
register_validator(flag_name, function,
message=message,
flag_values=flag_values)
return function
return decorate
def register_multi_flags_validator(flag_names,
multi_flags_checker,
message='Flags validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint to multiple flags.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_names: [str | FlagHolder], a list of the flag names or holders to be
checked. Positional-only parameter.
multi_flags_checker: callable, a function to validate the flag.
* input - dict, with keys() being flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
* output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when a flag is not registered as a valid flag name.
ValueError: Raised when multiple FlagValues are used in the same
invocation. This can occur when FlagHolders have different `_flagvalues`
or when str-type flag_names entries are present and the `flag_values`
argument does not match that of provided FlagHolder(s).
"""
flag_names, flag_values = _flagvalues.resolve_flag_refs(
flag_names, flag_values)
v = _validators_classes.MultiFlagsValidator(
flag_names, multi_flags_checker, message)
_add_validator(flag_values, v)
def multi_flags_validator(flag_names,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""A function decorator for defining a multi-flag validator.
Registers the decorated function as a validator for flag_names, e.g.::
@flags.multi_flags_validator(['foo', 'bar'])
def _CheckFooBar(flags_dict):
...
See :func:`register_multi_flags_validator` for the specification of checker
function.
Args:
flag_names: [str | FlagHolder], a list of the flag names or holders to be
checked. Positional-only parameter.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: Raised when a flag is not registered as a valid flag name.
"""
def decorate(function):
register_multi_flags_validator(flag_names,
function,
message=message,
flag_values=flag_values)
return function
return decorate
def mark_flag_as_required(flag_name, flag_values=_flagvalues.FLAGS):
"""Ensures that flag is not None during program execution.
Registers a flag validator, which will follow usual validator rules.
Important note: validator will pass for any non-``None`` value, such as
``False``, ``0`` (zero), ``''`` (empty string) and so on.
If your module might be imported by others, and you only wish to make the flag
required when the module is directly executed, call this method like this::
if __name__ == '__main__':
flags.mark_flag_as_required('your_flag_name')
app.run()
Args:
flag_name: str | FlagHolder, name or holder of the flag.
Positional-only parameter.
flag_values: flags.FlagValues, optional :class:`~absl.flags.FlagValues`
instance where the flag is defined.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
ValueError: Raised when flag_values is non-default and does not match the
FlagValues of the provided FlagHolder instance.
"""
flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values)
if flag_values[flag_name].default is not None:
warnings.warn(
'Flag --%s has a non-None default value; therefore, '
'mark_flag_as_required will pass even if flag is not specified in the '
'command line!' % flag_name,
stacklevel=2)
register_validator(
flag_name,
lambda value: value is not None,
message='Flag --{} must have a value other than None.'.format(flag_name),
flag_values=flag_values)
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS):
"""Ensures that flags are not None during program execution.
If your module might be imported by others, and you only wish to make the flag
required when the module is directly executed, call this method like this::
if __name__ == '__main__':
flags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])
app.run()
Args:
flag_names: Sequence[str | FlagHolder], names or holders of the flags.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
Raises:
AttributeError: If any of flag name has not already been defined as a flag.
"""
for flag_name in flag_names:
mark_flag_as_required(flag_name, flag_values)
def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=_flagvalues.FLAGS):
"""Ensures that only one flag among flag_names is not None.
Important note: This validator checks if flag values are ``None``, and it does
not distinguish between default and explicit values. Therefore, this validator
does not make sense when applied to flags with default values other than None,
including other false values (e.g. ``False``, ``0``, ``''``, ``[]``). That
includes multi flags with a default value of ``[]`` instead of None.
Args:
flag_names: [str | FlagHolder], names or holders of flags.
Positional-only parameter.
required: bool. If true, exactly one of the flags must have a value other
than None. Otherwise, at most one of the flags can have a value other
than None, and it is valid for all of the flags to be None.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
Raises:
ValueError: Raised when multiple FlagValues are used in the same
invocation. This can occur when FlagHolders have different `_flagvalues`
or when str-type flag_names entries are present and the `flag_values`
argument does not match that of provided FlagHolder(s).
"""
flag_names, flag_values = _flagvalues.resolve_flag_refs(
flag_names, flag_values)
for flag_name in flag_names:
if flag_values[flag_name].default is not None:
warnings.warn(
'Flag --{} has a non-None default value. That does not make sense '
'with mark_flags_as_mutual_exclusive, which checks whether the '
'listed flags have a value other than None.'.format(flag_name),
stacklevel=2)
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
raise _exceptions.ValidationError(
'{} one of ({}) must have a value other than None.'.format(
'Exactly' if required else 'At most', ', '.join(flag_names)))
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values)
def mark_bool_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=_flagvalues.FLAGS):
"""Ensures that only one flag among flag_names is True.
Args:
flag_names: [str | FlagHolder], names or holders of flags.
Positional-only parameter.
required: bool. If true, exactly one flag must be True. Otherwise, at most
one flag can be True, and it is valid for all flags to be False.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
Raises:
ValueError: Raised when multiple FlagValues are used in the same
invocation. This can occur when FlagHolders have different `_flagvalues`
or when str-type flag_names entries are present and the `flag_values`
argument does not match that of provided FlagHolder(s).
"""
flag_names, flag_values = _flagvalues.resolve_flag_refs(
flag_names, flag_values)
for flag_name in flag_names:
if not flag_values[flag_name].boolean:
raise _exceptions.ValidationError(
'Flag --{} is not Boolean, which is required for flags used in '
'mark_bool_flags_as_mutual_exclusive.'.format(flag_name))
def validate_boolean_mutual_exclusion(flags_dict):
flag_count = sum(bool(val) for val in flags_dict.values())
if flag_count == 1 or (not required and flag_count == 0):
return True
raise _exceptions.ValidationError(
'{} one of ({}) must be True.'.format(
'Exactly' if required else 'At most', ', '.join(flag_names)))
register_multi_flags_validator(
flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values)
def _add_validator(fv, validator_instance):
"""Register new flags validator to be checked.
Args:
fv: flags.FlagValues, the FlagValues instance to add the validator.
validator_instance: validators.Validator, the validator to add.
Raises:
KeyError: Raised when validators work with a non-existing flag.
"""
for flag_name in validator_instance.get_flags_names():
fv[flag_name].validators.append(validator_instance)

View File

@ -0,0 +1,172 @@
# Copyright 2021 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines *private* classes used for flag validators.
Do NOT import this module. DO NOT use anything from this module. They are
private APIs.
"""
from absl.flags import _exceptions
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SingleFlagValidator and
multi_flags_validator for a detailed description.
message: str, error message to be shown to the user.
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered.
self.insertion_index = Validator.validators_count
def verify(self, flag_values):
"""Verifies that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Raises:
Error: Raised if constraint is not satisfied.
"""
param = self._get_input_to_checker_function(flag_values)
if not self.checker(param):
raise _exceptions.ValidationError(self.message)
def get_flags_names(self):
"""Returns the names of the flags checked by this validator.
Returns:
[string], names of the flags.
"""
raise NotImplementedError('This method should be overloaded')
def print_flags_with_values(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
The input to be given to checker. The return type depends on the specific
validator.
"""
raise NotImplementedError('This method should be overloaded')
class SingleFlagValidator(Validator):
"""Validator behind register_validator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error message to be shown to the user if validator's
condition is not satisfied.
"""
super(SingleFlagValidator, self).__init__(checker, message)
self.flag_name = flag_name
def get_flags_names(self):
return [self.flag_name]
def print_flags_with_values(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Returns:
object, the input to be given to checker.
"""
return flag_values[self.flag_name].value
class MultiFlagsValidator(Validator):
"""Validator behind register_multi_flags_validator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [str], containing names of the flags used by checker.
checker: function to verify the validator.
input - dict, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean,
etc).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error message to be shown to the user if validator's
condition is not satisfied
"""
super(MultiFlagsValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Returns:
dict, with keys() being self.flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def print_flags_with_values(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def get_flags_names(self):
return self.flag_names

View File

@ -0,0 +1,388 @@
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides argparse integration with absl.flags.
``argparse_flags.ArgumentParser`` is a drop-in replacement for
:class:`argparse.ArgumentParser`. It takes care of collecting and defining absl
flags in :mod:`argparse`.
Here is a simple example::
# Assume the following absl.flags is defined in another module:
#
# from absl import flags
# flags.DEFINE_string('echo', None, 'The echo message.')
#
parser = argparse_flags.ArgumentParser(
description='A demo of absl.flags and argparse integration.')
parser.add_argument('--header', help='Header message to print.')
# The parser will also accept the absl flag `--echo`.
# The `header` value is available as `args.header` just like a regular
# argparse flag. The absl flag `--echo` continues to be available via
# `absl.flags.FLAGS` if you want to access it.
args = parser.parse_args()
# Example usages:
# ./program --echo='A message.' --header='A header'
# ./program --header 'A header' --echo 'A message.'
Here is another example demonstrates subparsers::
parser = argparse_flags.ArgumentParser(description='A subcommands demo.')
parser.add_argument('--header', help='The header message to print.')
subparsers = parser.add_subparsers(help='The command to execute.')
roll_dice_parser = subparsers.add_parser(
'roll_dice', help='Roll a dice.',
# By default, absl flags can also be specified after the sub-command.
# To only allow them before sub-command, pass
# `inherited_absl_flags=None`.
inherited_absl_flags=None)
roll_dice_parser.add_argument('--num_faces', type=int, default=6)
roll_dice_parser.set_defaults(command=roll_dice)
shuffle_parser = subparsers.add_parser('shuffle', help='Shuffle inputs.')
shuffle_parser.add_argument(
'inputs', metavar='I', nargs='+', help='Inputs to shuffle.')
shuffle_parser.set_defaults(command=shuffle)
args = parser.parse_args(argv[1:])
args.command(args)
# Example usages:
# ./program --echo='A message.' roll_dice --num_faces=6
# ./program shuffle --echo='A message.' 1 2 3 4
There are several differences between :mod:`absl.flags` and
:mod:`~absl.flags.argparse_flags`:
1. Flags defined with absl.flags are parsed differently when using the
argparse parser. Notably:
1) absl.flags allows both single-dash and double-dash for any flag, and
doesn't distinguish them; argparse_flags only allows double-dash for
flag's regular name, and single-dash for flag's ``short_name``.
2) Boolean flags in absl.flags can be specified with ``--bool``,
``--nobool``, as well as ``--bool=true/false`` (though not recommended);
in argparse_flags, it only allows ``--bool``, ``--nobool``.
2. Help related flag differences:
1) absl.flags does not define help flags, absl.app does that; argparse_flags
defines help flags unless passed with ``add_help=False``.
2) absl.app supports ``--helpxml``; argparse_flags does not.
3) argparse_flags supports ``-h``; absl.app does not.
"""
import argparse
import sys
from absl import flags
_BUILT_IN_FLAGS = frozenset({
'help',
'helpshort',
'helpfull',
'helpxml',
'flagfile',
'undefok',
})
class ArgumentParser(argparse.ArgumentParser):
"""Custom ArgumentParser class to support special absl flags."""
def __init__(self, **kwargs):
"""Initializes ArgumentParser.
Args:
**kwargs: same as argparse.ArgumentParser, except:
1. It also accepts `inherited_absl_flags`: the absl flags to inherit.
The default is the global absl.flags.FLAGS instance. Pass None to
ignore absl flags.
2. The `prefix_chars` argument must be the default value '-'.
Raises:
ValueError: Raised when prefix_chars is not '-'.
"""
prefix_chars = kwargs.get('prefix_chars', '-')
if prefix_chars != '-':
raise ValueError(
'argparse_flags.ArgumentParser only supports "-" as the prefix '
'character, found "{}".'.format(prefix_chars))
# Remove inherited_absl_flags before calling super.
self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS)
# Now call super to initialize argparse.ArgumentParser before calling
# add_argument in _define_absl_flags.
super(ArgumentParser, self).__init__(**kwargs)
if self.add_help:
# -h and --help are defined in super.
# Also add the --helpshort and --helpfull flags.
self.add_argument(
# Action 'help' defines a similar flag to -h/--help.
'--helpshort', action='help',
default=argparse.SUPPRESS, help=argparse.SUPPRESS)
self.add_argument(
'--helpfull', action=_HelpFullAction,
default=argparse.SUPPRESS, help='show full help message and exit')
if self._inherited_absl_flags is not None:
self.add_argument(
'--undefok', default=argparse.SUPPRESS, help=argparse.SUPPRESS)
self._define_absl_flags(self._inherited_absl_flags)
def parse_known_args(self, args=None, namespace=None):
if args is None:
args = sys.argv[1:]
if self._inherited_absl_flags is not None:
# Handle --flagfile.
# Explicitly specify force_gnu=True, since argparse behaves like
# gnu_getopt: flags can be specified after positional arguments.
args = self._inherited_absl_flags.read_flags_from_files(
args, force_gnu=True)
undefok_missing = object()
undefok = getattr(namespace, 'undefok', undefok_missing)
namespace, args = super(ArgumentParser, self).parse_known_args(
args, namespace)
# For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where
# sub-parsers don't preserve existing namespace attributes.
# Restore the undefok attribute if a sub-parser dropped it.
if undefok is not undefok_missing:
namespace.undefok = undefok
if self._inherited_absl_flags is not None:
# Handle --undefok. At this point, `args` only contains unknown flags,
# so it won't strip defined flags that are also specified with --undefok.
# For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where
# sub-parsers don't preserve existing namespace attributes. The undefok
# attribute might not exist because a subparser dropped it.
if hasattr(namespace, 'undefok'):
args = _strip_undefok_args(namespace.undefok, args)
# absl flags are not exposed in the Namespace object. See Namespace:
# https://docs.python.org/3/library/argparse.html#argparse.Namespace.
del namespace.undefok
self._inherited_absl_flags.mark_as_parsed()
try:
self._inherited_absl_flags.validate_all_flags()
except flags.IllegalFlagValueError as e:
self.error(str(e))
return namespace, args
def _define_absl_flags(self, absl_flags):
"""Defines flags from absl_flags."""
key_flags = set(absl_flags.get_key_flags_for_module(sys.argv[0]))
for name in absl_flags:
if name in _BUILT_IN_FLAGS:
# Do not inherit built-in flags.
continue
flag_instance = absl_flags[name]
# Each flags with short_name appears in FLAGS twice, so only define
# when the dictionary key is equal to the regular name.
if name == flag_instance.name:
# Suppress the flag in the help short message if it's not a main
# module's key flag.
suppress = flag_instance not in key_flags
self._define_absl_flag(flag_instance, suppress)
def _define_absl_flag(self, flag_instance, suppress):
"""Defines a flag from the flag_instance."""
flag_name = flag_instance.name
short_name = flag_instance.short_name
argument_names = ['--' + flag_name]
if short_name:
argument_names.insert(0, '-' + short_name)
if suppress:
helptext = argparse.SUPPRESS
else:
# argparse help string uses %-formatting. Escape the literal %'s.
helptext = flag_instance.help.replace('%', '%%')
if flag_instance.boolean:
# Only add the `no` form to the long name.
argument_names.append('--no' + flag_name)
self.add_argument(
*argument_names, action=_BooleanFlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
else:
self.add_argument(
*argument_names, action=_FlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
class _FlagAction(argparse.Action):
"""Action class for Abseil non-boolean flags."""
def __init__(
self,
option_strings,
dest,
help, # pylint: disable=redefined-builtin
metavar,
flag_instance,
default=argparse.SUPPRESS):
"""Initializes _FlagAction.
Args:
option_strings: See argparse.Action.
dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS.
help: See argparse.Action.
metavar: See argparse.Action.
flag_instance: absl.flags.Flag, the absl flag instance.
default: Ignored. The flag always uses dest=argparse.SUPPRESS so it
doesn't affect the parsing result.
"""
del dest
self._flag_instance = flag_instance
super(_FlagAction, self).__init__(
option_strings=option_strings,
dest=argparse.SUPPRESS,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
"""See https://docs.python.org/3/library/argparse.html#action-classes."""
self._flag_instance.parse(values)
self._flag_instance.using_default_value = False
class _BooleanFlagAction(argparse.Action):
"""Action class for Abseil boolean flags."""
def __init__(
self,
option_strings,
dest,
help, # pylint: disable=redefined-builtin
metavar,
flag_instance,
default=argparse.SUPPRESS):
"""Initializes _BooleanFlagAction.
Args:
option_strings: See argparse.Action.
dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS.
help: See argparse.Action.
metavar: See argparse.Action.
flag_instance: absl.flags.Flag, the absl flag instance.
default: Ignored. The flag always uses dest=argparse.SUPPRESS so it
doesn't affect the parsing result.
"""
del dest, default
self._flag_instance = flag_instance
flag_names = [self._flag_instance.name]
if self._flag_instance.short_name:
flag_names.append(self._flag_instance.short_name)
self._flag_names = frozenset(flag_names)
super(_BooleanFlagAction, self).__init__(
option_strings=option_strings,
dest=argparse.SUPPRESS,
nargs=0, # Does not accept values, only `--bool` or `--nobool`.
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
"""See https://docs.python.org/3/library/argparse.html#action-classes."""
if not isinstance(values, list) or values:
raise ValueError('values must be an empty list.')
if option_string.startswith('--'):
option = option_string[2:]
else:
option = option_string[1:]
if option in self._flag_names:
self._flag_instance.parse('true')
else:
if not option.startswith('no') or option[2:] not in self._flag_names:
raise ValueError('invalid option_string: ' + option_string)
self._flag_instance.parse('false')
self._flag_instance.using_default_value = False
class _HelpFullAction(argparse.Action):
"""Action class for --helpfull flag."""
def __init__(self, option_strings, dest, default, help): # pylint: disable=redefined-builtin
"""Initializes _HelpFullAction.
Args:
option_strings: See argparse.Action.
dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS.
default: Ignored.
help: See argparse.Action.
"""
del dest, default
super(_HelpFullAction, self).__init__(
option_strings=option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
"""See https://docs.python.org/3/library/argparse.html#action-classes."""
# This only prints flags when help is not argparse.SUPPRESS.
# It includes user defined argparse flags, as well as main module's
# key absl flags. Other absl flags use argparse.SUPPRESS, so they aren't
# printed here.
parser.print_help()
absl_flags = parser._inherited_absl_flags # pylint: disable=protected-access
if absl_flags is not None:
modules = sorted(absl_flags.flags_by_module_dict())
main_module = sys.argv[0]
if main_module in modules:
# The main module flags are already printed in parser.print_help().
modules.remove(main_module)
print(absl_flags._get_help_for_modules( # pylint: disable=protected-access
modules, prefix='', include_special_flags=True))
parser.exit()
def _strip_undefok_args(undefok, args):
"""Returns a new list of args after removing flags in --undefok."""
if undefok:
undefok_names = set(name.strip() for name in undefok.split(','))
undefok_names |= set('no' + name for name in undefok_names)
# Remove undefok flags.
args = [arg for arg in args if not _is_undefok(arg, undefok_names)]
return args
def _is_undefok(arg, undefok_names):
"""Returns whether we can ignore arg based on a set of undefok flag names."""
if not arg.startswith('-'):
return False
if arg.startswith('--'):
arg_without_dash = arg[2:]
else:
arg_without_dash = arg[1:]
if '=' in arg_without_dash:
name, _ = arg_without_dash.split('=', 1)
else:
name = arg_without_dash
if name in undefok_names:
return True
return False

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,290 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, TypeVar, Union
from absl import flags
# Logging levels.
FATAL: int
ERROR: int
WARNING: int
WARN: int # Deprecated name.
INFO: int
DEBUG: int
ABSL_LOGGING_PREFIX_REGEX: str
LOGTOSTDERR: flags.FlagHolder[bool]
ALSOLOGTOSTDERR: flags.FlagHolder[bool]
LOG_DIR: flags.FlagHolder[str]
VERBOSITY: flags.FlagHolder[int]
LOGGER_LEVELS: flags.FlagHolder[Dict[str, str]]
STDERRTHRESHOLD: flags.FlagHolder[str]
SHOWPREFIXFORINFO: flags.FlagHolder[bool]
def get_verbosity() -> int:
...
def set_verbosity(v: Union[int, str]) -> None:
...
def set_stderrthreshold(s: Union[int, str]) -> None:
...
# TODO(b/277607978): Provide actual args+kwargs shadowing stdlib's logging functions.
def fatal(msg: Any, *args: Any, **kwargs: Any) -> NoReturn:
...
def error(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def warning(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def warn(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def info(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def debug(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def exception(msg: Any, *args: Any, **kwargs: Any) -> None:
...
def log_every_n(level: int, msg: Any, n: int, *args: Any) -> None:
...
def log_every_n_seconds(
level: int, msg: Any, n_seconds: float, *args: Any
) -> None:
...
def log_first_n(level: int, msg: Any, n: int, *args: Any) -> None:
...
def log_if(level: int, msg: Any, condition: Any, *args: Any) -> None:
...
def log(level: int, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def vlog(level: int, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def vlog_is_on(level: int) -> bool:
...
def flush() -> None:
...
def level_debug() -> bool:
...
def level_info() -> bool:
...
def level_warning() -> bool:
...
level_warn = level_warning # Deprecated function.
def level_error() -> bool:
...
def get_log_file_name(level: int = ...) -> str:
...
def find_log_dir_and_names(
program_name: Optional[str] = ..., log_dir: Optional[str] = ...
) -> Tuple[str, str, str]:
...
def find_log_dir(log_dir: Optional[str] = ...) -> str:
...
def get_absl_log_prefix(record: logging.LogRecord) -> str:
...
_SkipLogT = TypeVar('_SkipLogT', str, Callable[..., Any])
def skip_log_prefix(func: _SkipLogT) -> _SkipLogT:
...
_StreamT = TypeVar("_StreamT")
class PythonHandler(logging.StreamHandler[_StreamT]):
def __init__(
self,
stream: Optional[_StreamT] = ...,
formatter: Optional[logging.Formatter] = ...,
) -> None:
...
def start_logging_to_file(
self, program_name: Optional[str] = ..., log_dir: Optional[str] = ...
) -> None:
...
def use_absl_log_file(
self, program_name: Optional[str] = ..., log_dir: Optional[str] = ...
) -> None:
...
def flush(self) -> None:
...
def emit(self, record: logging.LogRecord) -> None:
...
def close(self) -> None:
...
class ABSLHandler(logging.Handler):
def __init__(self, python_logging_formatter: PythonFormatter) -> None:
...
def format(self, record: logging.LogRecord) -> str:
...
def setFormatter(self, fmt) -> None:
...
def emit(self, record: logging.LogRecord) -> None:
...
def flush(self) -> None:
...
def close(self) -> None:
...
def handle(self, record: logging.LogRecord) -> bool:
...
@property
def python_handler(self) -> PythonHandler:
...
def activate_python_handler(self) -> None:
...
def use_absl_log_file(
self, program_name: Optional[str] = ..., log_dir: Optional[str] = ...
) -> None:
...
def start_logging_to_file(self, program_name=None, log_dir=None) -> None:
...
class PythonFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
...
class ABSLLogger(logging.Logger):
def findCaller(
self, stack_info: bool = ..., stacklevel: int = ...
) -> Tuple[str, int, str, Optional[str]]:
...
def critical(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def fatal(self, msg: Any, *args: Any, **kwargs: Any) -> NoReturn:
...
def error(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def warn(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def info(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def log(self, level: int, msg: Any, *args: Any, **kwargs: Any) -> None:
...
def handle(self, record: logging.LogRecord) -> None:
...
@classmethod
def register_frame_to_skip(
cls, file_name: str, function_name: str, line_number: Optional[int] = ...
) -> None:
...
# NOTE: Returns None before _initialize called but shouldn't occur after import.
def get_absl_logger() -> ABSLLogger:
...
# NOTE: Returns None before _initialize called but shouldn't occur after import.
def get_absl_handler() -> ABSLHandler:
...
def use_python_logging(quiet: bool = ...) -> None:
...
def use_absl_handler() -> None:
...

View File

@ -0,0 +1,214 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to convert log levels between Abseil Python, C++, and Python standard.
This converter has to convert (best effort) between three different
logging level schemes:
* **cpp**: The C++ logging level scheme used in Abseil C++.
* **absl**: The absl.logging level scheme used in Abseil Python.
* **standard**: The python standard library logging level scheme.
Here is a handy ascii chart for easy mental mapping::
LEVEL | cpp | absl | standard |
---------+-----+--------+----------+
DEBUG | 0 | 1 | 10 |
INFO | 0 | 0 | 20 |
WARNING | 1 | -1 | 30 |
ERROR | 2 | -2 | 40 |
CRITICAL | 3 | -3 | 50 |
FATAL | 3 | -3 | 50 |
Note: standard logging ``CRITICAL`` is mapped to absl/cpp ``FATAL``.
However, only ``CRITICAL`` logs from the absl logger (or absl.logging.fatal)
will terminate the program. ``CRITICAL`` logs from non-absl loggers are treated
as error logs with a message prefix ``"CRITICAL - "``.
Converting from standard to absl or cpp is a lossy conversion.
Converting back to standard will lose granularity. For this reason,
users should always try to convert to standard, the richest
representation, before manipulating the levels, and then only to cpp
or absl if those level schemes are absolutely necessary.
"""
import logging
STANDARD_CRITICAL = logging.CRITICAL
STANDARD_ERROR = logging.ERROR
STANDARD_WARNING = logging.WARNING
STANDARD_INFO = logging.INFO
STANDARD_DEBUG = logging.DEBUG
# These levels are also used to define the constants
# FATAL, ERROR, WARNING, INFO, and DEBUG in the
# absl.logging module.
ABSL_FATAL = -3
ABSL_ERROR = -2
ABSL_WARNING = -1
ABSL_WARN = -1 # Deprecated name.
ABSL_INFO = 0
ABSL_DEBUG = 1
ABSL_LEVELS = {ABSL_FATAL: 'FATAL',
ABSL_ERROR: 'ERROR',
ABSL_WARNING: 'WARNING',
ABSL_INFO: 'INFO',
ABSL_DEBUG: 'DEBUG'}
# Inverts the ABSL_LEVELS dictionary
ABSL_NAMES = {'FATAL': ABSL_FATAL,
'ERROR': ABSL_ERROR,
'WARNING': ABSL_WARNING,
'WARN': ABSL_WARNING, # Deprecated name.
'INFO': ABSL_INFO,
'DEBUG': ABSL_DEBUG}
ABSL_TO_STANDARD = {ABSL_FATAL: STANDARD_CRITICAL,
ABSL_ERROR: STANDARD_ERROR,
ABSL_WARNING: STANDARD_WARNING,
ABSL_INFO: STANDARD_INFO,
ABSL_DEBUG: STANDARD_DEBUG}
# Inverts the ABSL_TO_STANDARD
STANDARD_TO_ABSL = dict((v, k) for (k, v) in ABSL_TO_STANDARD.items())
def get_initial_for_level(level):
"""Gets the initial that should start the log line for the given level.
It returns:
* ``'I'`` when: ``level < STANDARD_WARNING``.
* ``'W'`` when: ``STANDARD_WARNING <= level < STANDARD_ERROR``.
* ``'E'`` when: ``STANDARD_ERROR <= level < STANDARD_CRITICAL``.
* ``'F'`` when: ``level >= STANDARD_CRITICAL``.
Args:
level: int, a Python standard logging level.
Returns:
The first initial as it would be logged by the C++ logging module.
"""
if level < STANDARD_WARNING:
return 'I'
elif level < STANDARD_ERROR:
return 'W'
elif level < STANDARD_CRITICAL:
return 'E'
else:
return 'F'
def absl_to_cpp(level):
"""Converts an absl log level to a cpp log level.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in Abseil C++.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level)))
if level >= 0:
# C++ log levels must be >= 0
return 0
else:
return -level
def absl_to_standard(level):
"""Converts an integer level from the absl value to the standard value.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in standard logging.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level)))
if level < ABSL_FATAL:
level = ABSL_FATAL
if level <= ABSL_DEBUG:
return ABSL_TO_STANDARD[level]
# Maps to vlog levels.
return STANDARD_DEBUG - level + 1
def string_to_standard(level):
"""Converts a string level to standard logging level value.
Args:
level: str, case-insensitive ``'debug'``, ``'info'``, ``'warning'``,
``'error'``, ``'fatal'``.
Returns:
The corresponding integer level for use in standard logging.
"""
return absl_to_standard(ABSL_NAMES.get(level.upper()))
def standard_to_absl(level):
"""Converts an integer level from the standard value to the absl value.
Args:
level: int, a Python standard logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in absl logging.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level)))
if level < 0:
level = 0
if level < STANDARD_DEBUG:
# Maps to vlog levels.
return STANDARD_DEBUG - level + 1
elif level < STANDARD_INFO:
return ABSL_DEBUG
elif level < STANDARD_WARNING:
return ABSL_INFO
elif level < STANDARD_ERROR:
return ABSL_WARNING
elif level < STANDARD_CRITICAL:
return ABSL_ERROR
else:
return ABSL_FATAL
def standard_to_cpp(level):
"""Converts an integer level from the standard value to the cpp value.
Args:
level: int, a Python standard logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in cpp logging.
"""
return absl_to_cpp(standard_to_absl(level))

View File

@ -0,0 +1,13 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,68 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal helper for running tests on Windows Bazel."""
import os
from absl import flags
FLAGS = flags.FLAGS
def get_executable_path(py_binary_name):
"""Returns the executable path of a py_binary.
This returns the executable path of a py_binary that is in another Bazel
target's data dependencies.
On Linux/macOS, the path and __file__ has the same root directory.
On Windows, bazel builds an .exe file and we need to use the MANIFEST file
the location the actual binary.
Args:
py_binary_name: string, the name of a py_binary that is in another Bazel
target's data dependencies.
Raises:
RuntimeError: Raised when it cannot locate the executable path.
"""
if os.name == 'nt':
py_binary_name += '.exe'
manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST')
workspace_name = os.environ['TEST_WORKSPACE']
manifest_entry = '{}/{}'.format(workspace_name, py_binary_name)
with open(manifest_file, 'r') as manifest_fd:
for line in manifest_fd:
tokens = line.strip().split(' ')
if len(tokens) != 2:
continue
if manifest_entry == tokens[0]:
return tokens[1]
raise RuntimeError(
'Cannot locate executable path for {}, MANIFEST file: {}.'.format(
py_binary_name, manifest_file))
else:
# NOTE: __file__ may be .py or .pyc, depending on how the module was
# loaded and executed.
path = __file__
# Use the package name to find the root directory: every dot is
# a directory, plus one for ourselves.
for _ in range(__name__.count('.') + 1):
path = os.path.dirname(path)
root_directory = path
return os.path.join(root_directory, py_binary_name)

View File

@ -0,0 +1,91 @@
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TestResult implementing default output for test execution status."""
import unittest
class TextTestResult(unittest.TextTestResult):
"""TestResult class that provides the default text result formatting."""
def __init__(self, stream, descriptions, verbosity):
# Disable the verbose per-test output from the superclass, since it would
# conflict with our customized output.
super(TextTestResult, self).__init__(stream, descriptions, 0)
self._per_test_output = verbosity > 0
def _print_status(self, tag, test):
if self._per_test_output:
test_id = test.id()
if test_id.startswith('__main__.'):
test_id = test_id[len('__main__.'):]
print('[%s] %s' % (tag, test_id), file=self.stream)
self.stream.flush()
def startTest(self, test):
super(TextTestResult, self).startTest(test)
self._print_status(' RUN ', test)
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
self._print_status(' OK ', test)
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
self._print_status(' FAILED ', test)
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
self._print_status(' FAILED ', test)
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
self._print_status(' SKIPPED ', test)
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
self._print_status(' OK ', test)
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
self._print_status(' FAILED ', test)
class TextTestRunner(unittest.TextTestRunner):
"""A test runner that produces formatted text results."""
_TEST_RESULT_CLASS = TextTestResult
# Set this to true at the class or instance level to run tests using a
# debug-friendly method (e.g, one that doesn't catch exceptions and interacts
# better with debuggers).
# Usually this is set using --pdb_post_mortem.
run_for_debugging = False
def run(self, test):
# type: (TestCase) -> TestResult
if self.run_for_debugging:
return self._run_debug(test)
else:
return super(TextTestRunner, self).run(test)
def _run_debug(self, test):
# type: (TestCase) -> TestResult
test.debug()
# Return an empty result to indicate success.
return self._makeResult()
def _makeResult(self):
return TextTestResult(self.stream, self.descriptions, self.verbosity)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,386 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator and context manager for saving and restoring flag values.
There are many ways to save and restore. Always use the most convenient method
for a given use case.
Here are examples of each method. They all call ``do_stuff()`` while
``FLAGS.someflag`` is temporarily set to ``'foo'``::
from absl.testing import flagsaver
# Use a decorator which can optionally override flags via arguments.
@flagsaver.flagsaver(someflag='foo')
def some_func():
do_stuff()
# Use a decorator which can optionally override flags with flagholders.
@flagsaver.flagsaver((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, 23))
def some_func():
do_stuff()
# Use a decorator which does not override flags itself.
@flagsaver.flagsaver
def some_func():
FLAGS.someflag = 'foo'
do_stuff()
# Use a context manager which can optionally override flags via arguments.
with flagsaver.flagsaver(someflag='foo'):
do_stuff()
# Save and restore the flag values yourself.
saved_flag_values = flagsaver.save_flag_values()
try:
FLAGS.someflag = 'foo'
do_stuff()
finally:
flagsaver.restore_flag_values(saved_flag_values)
# Use the parsing version to emulate users providing the flags.
# Note that all flags must be provided as strings (unparsed).
@flagsaver.as_parsed(some_int_flag='123')
def some_func():
# Because the flag was parsed it is considered "present".
assert FLAGS.some_int_flag.present
do_stuff()
# flagsaver.as_parsed() can also be used as a context manager just like
# flagsaver.flagsaver()
with flagsaver.as_parsed(some_int_flag='123'):
do_stuff()
# The flagsaver.as_parsed() interface also supports FlagHolder objects.
@flagsaver.as_parsed((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, '23'))
def some_func():
do_stuff()
# Using as_parsed with a multi_X flag requires a sequence of strings.
@flagsaver.as_parsed(some_multi_int_flag=['123', '456'])
def some_func():
assert FLAGS.some_multi_int_flag.present
do_stuff()
# If a flag name includes non-identifier characters it can be specified like
# so:
@flagsaver.as_parsed(**{'i-like-dashes': 'true'})
def some_func():
do_stuff()
We save and restore a shallow copy of each Flag object's ``__dict__`` attribute.
This preserves all attributes of the flag, such as whether or not it was
overridden from its default value.
WARNING: Currently a flag that is saved and then deleted cannot be restored. An
exception will be raised. However if you *add* a flag after saving flag values,
and then restore flag values, the added flag will be deleted with no errors.
"""
import collections
import functools
import inspect
from typing import overload, Any, Callable, Mapping, Tuple, TypeVar, Type, Sequence, Union
from absl import flags
FLAGS = flags.FLAGS
# The type of pre/post wrapped functions.
_CallableT = TypeVar('_CallableT', bound=Callable)
@overload
def flagsaver(*args: Tuple[flags.FlagHolder, Any],
**kwargs: Any) -> '_FlagOverrider':
...
@overload
def flagsaver(func: _CallableT) -> _CallableT:
...
def flagsaver(*args, **kwargs):
"""The main flagsaver interface. See module doc for usage."""
return _construct_overrider(_FlagOverrider, *args, **kwargs)
@overload
def as_parsed(*args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]],
**kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider':
...
@overload
def as_parsed(func: _CallableT) -> _CallableT:
...
def as_parsed(*args, **kwargs):
"""Overrides flags by parsing strings, saves flag state similar to flagsaver.
This function can be used as either a decorator or context manager similar to
flagsaver.flagsaver(). However, where flagsaver.flagsaver() directly sets the
flags to new values, this function will parse the provided arguments as if
they were provided on the command line. Among other things, this will cause
`FLAGS['flag_name'].present == True`.
A note on unparsed input: For many flag types, the unparsed version will be
a single string. However for multi_x (multi_string, multi_integer, multi_enum)
the unparsed version will be a Sequence of strings.
Args:
*args: Tuples of FlagHolders and their unparsed value.
**kwargs: The keyword args are flag names, and the values are unparsed
values.
Returns:
_ParsingFlagOverrider that serves as a context manager or decorator. Will
save previous flag state and parse new flags, then on cleanup it will
restore the previous flag state.
"""
return _construct_overrider(_ParsingFlagOverrider, *args, **kwargs)
# NOTE: the order of these overload declarations matters. The type checker will
# pick the first match which could be incorrect.
@overload
def _construct_overrider(
flag_overrider_cls: Type['_ParsingFlagOverrider'],
*args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]],
**kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider':
...
@overload
def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'],
*args: Tuple[flags.FlagHolder, Any],
**kwargs: Any) -> '_FlagOverrider':
...
@overload
def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'],
func: _CallableT) -> _CallableT:
...
def _construct_overrider(flag_overrider_cls, *args, **kwargs):
"""Handles the args/kwargs returning an instance of flag_overrider_cls.
If flag_overrider_cls is _FlagOverrider then values should be native python
types matching the python types. Otherwise if flag_overrider_cls is
_ParsingFlagOverrider the values should be strings or sequences of strings.
Args:
flag_overrider_cls: The class that will do the overriding.
*args: Tuples of FlagHolder and the new flag value.
**kwargs: Keword args mapping flag name to new flag value.
Returns:
A _FlagOverrider to be used as a decorator or context manager.
"""
if not args:
return flag_overrider_cls(**kwargs)
# args can be [func] if used as `@flagsaver` instead of `@flagsaver(...)`
if len(args) == 1 and callable(args[0]):
if kwargs:
raise ValueError(
"It's invalid to specify both positional and keyword parameters.")
func = args[0]
if inspect.isclass(func):
raise TypeError('@flagsaver.flagsaver cannot be applied to a class.')
return _wrap(flag_overrider_cls, func, {})
# args can be a list of (FlagHolder, value) pairs.
# In which case they augment any specified kwargs.
for arg in args:
if not isinstance(arg, tuple) or len(arg) != 2:
raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,))
holder, value = arg
if not isinstance(holder, flags.FlagHolder):
raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,))
if holder.name in kwargs:
raise ValueError('Cannot set --%s multiple times' % holder.name)
kwargs[holder.name] = value
return flag_overrider_cls(**kwargs)
def save_flag_values(
flag_values: flags.FlagValues = FLAGS) -> Mapping[str, Mapping[str, Any]]:
"""Returns copy of flag values as a dict.
Args:
flag_values: FlagValues, the FlagValues instance with which the flag will be
saved. This should almost never need to be overridden.
Returns:
Dictionary mapping keys to values. Keys are flag names, values are
corresponding ``__dict__`` members. E.g. ``{'key': value_dict, ...}``.
"""
return {name: _copy_flag_dict(flag_values[name]) for name in flag_values}
def restore_flag_values(saved_flag_values: Mapping[str, Mapping[str, Any]],
flag_values: flags.FlagValues = FLAGS):
"""Restores flag values based on the dictionary of flag values.
Args:
saved_flag_values: {'flag_name': value_dict, ...}
flag_values: FlagValues, the FlagValues instance from which the flag will be
restored. This should almost never need to be overridden.
"""
new_flag_names = list(flag_values)
for name in new_flag_names:
saved = saved_flag_values.get(name)
if saved is None:
# If __dict__ was not saved delete "new" flag.
delattr(flag_values, name)
else:
if flag_values[name].value != saved['_value']:
flag_values[name].value = saved['_value'] # Ensure C++ value is set.
flag_values[name].__dict__ = saved
@overload
def _wrap(flag_overrider_cls: Type['_FlagOverrider'], func: _CallableT,
overrides: Mapping[str, Any]) -> _CallableT:
...
@overload
def _wrap(flag_overrider_cls: Type['_ParsingFlagOverrider'], func: _CallableT,
overrides: Mapping[str, Union[str, Sequence[str]]]) -> _CallableT:
...
def _wrap(flag_overrider_cls, func, overrides):
"""Creates a wrapper function that saves/restores flag values.
Args:
flag_overrider_cls: The class that will be used as a context manager.
func: This will be called between saving flags and restoring flags.
overrides: Flag names mapped to their values. These flags will be set after
saving the original flag state. The type of the values depends on if
_FlagOverrider or _ParsingFlagOverrider was specified.
Returns:
A wrapped version of func.
"""
@functools.wraps(func)
def _flagsaver_wrapper(*args, **kwargs):
"""Wrapper function that saves and restores flags."""
with flag_overrider_cls(**overrides):
return func(*args, **kwargs)
return _flagsaver_wrapper
class _FlagOverrider(object):
"""Overrides flags for the duration of the decorated function call.
It also restores all original values of flags after decorated method
completes.
"""
def __init__(self, **overrides: Any):
self._overrides = overrides
self._saved_flag_values = None
def __call__(self, func: _CallableT) -> _CallableT:
if inspect.isclass(func):
raise TypeError('flagsaver cannot be applied to a class.')
return _wrap(self.__class__, func, self._overrides)
def __enter__(self):
self._saved_flag_values = save_flag_values(FLAGS)
try:
FLAGS._set_attributes(**self._overrides)
except:
# It may fail because of flag validators.
restore_flag_values(self._saved_flag_values, FLAGS)
raise
def __exit__(self, exc_type, exc_value, traceback):
restore_flag_values(self._saved_flag_values, FLAGS)
class _ParsingFlagOverrider(_FlagOverrider):
"""Context manager for overriding flags.
Simulates command line parsing.
This is simlar to _FlagOverrider except that all **overrides should be
strings or sequences of strings, and when context is entered this class calls
.parse(value)
This results in the flags having .present set properly.
"""
def __init__(self, **overrides: Union[str, Sequence[str]]):
for flag_name, new_value in overrides.items():
if isinstance(new_value, str):
continue
if (isinstance(new_value, collections.abc.Sequence) and
all(isinstance(single_value, str) for single_value in new_value)):
continue
raise TypeError(
f'flagsaver.as_parsed() cannot parse {flag_name}. Expected a single '
f'string or sequence of strings but {type(new_value)} was provided.')
super().__init__(**overrides)
def __enter__(self):
self._saved_flag_values = save_flag_values(FLAGS)
try:
for flag_name, unparsed_value in self._overrides.items():
# LINT.IfChange(flag_override_parsing)
FLAGS[flag_name].parse(unparsed_value)
FLAGS[flag_name].using_default_value = False
# LINT.ThenChange()
# Perform the validation on all modified flags. This is something that
# FLAGS._set_attributes() does for you in _FlagOverrider.
for flag_name in self._overrides:
FLAGS._assert_validators(FLAGS[flag_name].validators)
except KeyError as e:
# If a flag doesn't exist, an UnrecognizedFlagError is more specific.
restore_flag_values(self._saved_flag_values, FLAGS)
raise flags.UnrecognizedFlagError('Unknown command line flag.') from e
except:
# It may fail because of flag validators or general parsing issues.
restore_flag_values(self._saved_flag_values, FLAGS)
raise
def _copy_flag_dict(flag: flags.Flag) -> Mapping[str, Any]:
"""Returns a copy of the flag object's ``__dict__``.
It's mostly a shallow copy of the ``__dict__``, except it also does a shallow
copy of the validator list.
Args:
flag: flags.Flag, the flag to copy.
Returns:
A copy of the flag object's ``__dict__``.
"""
copy = flag.__dict__.copy()
copy['_value'] = flag.value # Ensure correct restore for C++ flags.
copy['validators'] = list(flag.validators)
return copy

View File

@ -0,0 +1,724 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds support for parameterized tests to Python's unittest TestCase class.
A parameterized test is a method in a test case that is invoked with different
argument tuples.
A simple example::
class AdditionExample(parameterized.TestCase):
@parameterized.parameters(
(1, 2, 3),
(4, 5, 9),
(1, 1, 3))
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Each invocation is a separate test case and properly isolated just
like a normal test method, with its own setUp/tearDown cycle. In the
example above, there are three separate testcases, one of which will
fail due to an assertion error (1 + 1 != 3).
Parameters for individual test cases can be tuples (with positional parameters)
or dictionaries (with named parameters)::
class AdditionExample(parameterized.TestCase):
@parameterized.parameters(
{'op1': 1, 'op2': 2, 'result': 3},
{'op1': 4, 'op2': 5, 'result': 9},
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
If a parameterized test fails, the error message will show the
original test name and the parameters for that test.
The id method of the test, used internally by the unittest framework, is also
modified to show the arguments (but note that the name reported by `id()`
doesn't match the actual test name, see below). To make sure that test names
stay the same across several invocations, object representations like::
>>> class Foo(object):
... pass
>>> repr(Foo())
'<__main__.Foo object at 0x23d8610>'
are turned into ``__main__.Foo``. When selecting a subset of test cases to run
on the command-line, the test cases contain an index suffix for each argument
in the order they were passed to :func:`parameters` (eg. testAddition0,
testAddition1, etc.) This naming scheme is subject to change; for more reliable
and stable names, especially in test logs, use :func:`named_parameters` instead.
Tests using :func:`named_parameters` are similar to :func:`parameters`, except
only tuples or dicts of args are supported. For tuples, the first parameter arg
has to be a string (or an object that returns an apt name when converted via
``str()``). For dicts, a value for the key ``testcase_name`` must be present and
must be a string (or an object that returns an apt name when converted via
``str()``)::
class NamedExample(parameterized.TestCase):
@parameterized.named_parameters(
('Normal', 'aa', 'aaa', True),
('EmptyPrefix', '', 'abc', True),
('BothEmpty', '', '', True))
def testStartsWith(self, prefix, string, result):
self.assertEqual(result, string.startswith(prefix))
class NamedExample(parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': 'Normal',
'result': True, 'string': 'aaa', 'prefix': 'aa'},
{'testcase_name': 'EmptyPrefix',
'result': True, 'string': 'abc', 'prefix': ''},
{'testcase_name': 'BothEmpty',
'result': True, 'string': '', 'prefix': ''})
def testStartsWith(self, prefix, string, result):
self.assertEqual(result, string.startswith(prefix))
Named tests also have the benefit that they can be run individually
from the command line::
$ testmodule.py NamedExample.testStartsWithNormal
.
--------------------------------------------------------------------
Ran 1 test in 0.000s
OK
Parameterized Classes
=====================
If invocation arguments are shared across test methods in a single
TestCase class, instead of decorating all test methods
individually, the class itself can be decorated::
@parameterized.parameters(
(1, 2, 3),
(4, 5, 9))
class ArithmeticTest(parameterized.TestCase):
def testAdd(self, arg1, arg2, result):
self.assertEqual(arg1 + arg2, result)
def testSubtract(self, arg1, arg2, result):
self.assertEqual(result - arg1, arg2)
Inputs from Iterables
=====================
If parameters should be shared across several test cases, or are dynamically
created from other sources, a single non-tuple iterable can be passed into
the decorator. This iterable will be used to obtain the test cases::
class AdditionExample(parameterized.TestCase):
@parameterized.parameters(
c.op1, c.op2, c.result for c in testcases
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Single-Argument Test Methods
============================
If a test method takes only one argument, the single arguments must not be
wrapped into a tuple::
class NegativeNumberExample(parameterized.TestCase):
@parameterized.parameters(
-1, -3, -4, -5
)
def testIsNegative(self, arg):
self.assertTrue(IsNegative(arg))
List/tuple as a Single Argument
===============================
If a test method takes a single argument of a list/tuple, it must be wrapped
inside a tuple::
class ZeroSumExample(parameterized.TestCase):
@parameterized.parameters(
([-1, 0, 1], ),
([-2, 0, 2], ),
)
def testSumIsZero(self, arg):
self.assertEqual(0, sum(arg))
Cartesian product of Parameter Values as Parameterized Test Cases
=================================================================
If required to test method over a cartesian product of parameters,
`parameterized.product` may be used to facilitate generation of parameters
test combinations::
class TestModuloExample(parameterized.TestCase):
@parameterized.product(
num=[0, 20, 80],
modulo=[2, 4],
expected=[0]
)
def testModuloResult(self, num, modulo, expected):
self.assertEqual(expected, num % modulo)
This results in 6 test cases being created - one for each combination of the
parameters. It is also possible to supply sequences of keyword argument dicts
as elements of the cartesian product::
@parameterized.product(
(dict(num=5, modulo=3, expected=2),
dict(num=7, modulo=4, expected=3)),
dtype=(int, float)
)
def testModuloResult(self, num, modulo, expected, dtype):
self.assertEqual(expected, dtype(num) % modulo)
This results in 4 test cases being created - for each of the two sets of test
data (supplied as kwarg dicts) and for each of the two data types (supplied as
a named parameter). Multiple keyword argument dicts may be supplied if required.
Async Support
=============
If a test needs to call async functions, it can inherit from both
parameterized.TestCase and another TestCase that supports async calls, such
as [asynctest](https://github.com/Martiusweb/asynctest)::
import asynctest
class AsyncExample(parameterized.TestCase, asynctest.TestCase):
@parameterized.parameters(
('a', 1),
('b', 2),
)
async def testSomeAsyncFunction(self, arg, expected):
actual = await someAsyncFunction(arg)
self.assertEqual(actual, expected)
"""
from collections import abc
import functools
import inspect
import itertools
import re
import types
import unittest
import warnings
from absl.testing import absltest
_ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
_NAMED = object()
_ARGUMENT_REPR = object()
_NAMED_DICT_KEY = 'testcase_name'
class NoTestsError(Exception):
"""Raised when parameterized decorators do not generate any tests."""
class DuplicateTestNameError(Exception):
"""Raised when a parameterized test has the same test name multiple times."""
def __init__(self, test_class_name, new_test_name, original_test_name):
super(DuplicateTestNameError, self).__init__(
'Duplicate parameterized test name in {}: generated test name {!r} '
'(generated from {!r}) already exists. Consider using '
'named_parameters() to give your tests unique names and/or renaming '
'the conflicting test method.'.format(
test_class_name, new_test_name, original_test_name))
def _clean_repr(obj):
return _ADDR_RE.sub(r'<\1>', repr(obj))
def _non_string_or_bytes_iterable(obj):
return (isinstance(obj, abc.Iterable) and not isinstance(obj, str) and
not isinstance(obj, bytes))
def _format_parameter_list(testcase_params):
if isinstance(testcase_params, abc.Mapping):
return ', '.join('%s=%s' % (argname, _clean_repr(value))
for argname, value in testcase_params.items())
elif _non_string_or_bytes_iterable(testcase_params):
return ', '.join(map(_clean_repr, testcase_params))
else:
return _format_parameter_list((testcase_params,))
def _async_wrapped(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return wrapper
class _ParameterizedTestIter(object):
"""Callable and iterable class for producing new test cases."""
def __init__(self, test_method, testcases, naming_type, original_name=None):
"""Returns concrete test functions for a test and a list of parameters.
The naming_type is used to determine the name of the concrete
functions as reported by the unittest framework. If naming_type is
_FIRST_ARG, the testcases must be tuples, and the first element must
have a string representation that is a valid Python identifier.
Args:
test_method: The decorated test method.
testcases: (list of tuple/dict) A list of parameter tuples/dicts for
individual test invocations.
naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
original_name: The original test method name. When decorated on a test
method, None is passed to __init__ and test_method.__name__ is used.
Note test_method.__name__ might be different than the original defined
test method because of the use of other decorators. A more accurate
value is set by TestGeneratorMetaclass.__new__ later.
"""
self._test_method = test_method
self.testcases = testcases
self._naming_type = naming_type
if original_name is None:
original_name = test_method.__name__
self._original_name = original_name
self.__name__ = _ParameterizedTestIter.__name__
def __call__(self, *args, **kwargs):
raise RuntimeError('You appear to be running a parameterized test case '
'without having inherited from parameterized.'
'TestCase. This is bad because none of '
'your test cases are actually being run. You may also '
'be using another decorator before the parameterized '
'one, in which case you should reverse the order.')
def __iter__(self):
test_method = self._test_method
naming_type = self._naming_type
def make_bound_param_test(testcase_params):
@functools.wraps(test_method)
def bound_param_test(self):
if isinstance(testcase_params, abc.Mapping):
return test_method(self, **testcase_params)
elif _non_string_or_bytes_iterable(testcase_params):
return test_method(self, *testcase_params)
else:
return test_method(self, testcase_params)
if naming_type is _NAMED:
# Signal the metaclass that the name of the test function is unique
# and descriptive.
bound_param_test.__x_use_name__ = True
testcase_name = None
if isinstance(testcase_params, abc.Mapping):
if _NAMED_DICT_KEY not in testcase_params:
raise RuntimeError(
'Dict for named tests must contain key "%s"' % _NAMED_DICT_KEY)
# Create a new dict to avoid modifying the supplied testcase_params.
testcase_name = testcase_params[_NAMED_DICT_KEY]
testcase_params = {
k: v for k, v in testcase_params.items() if k != _NAMED_DICT_KEY
}
elif _non_string_or_bytes_iterable(testcase_params):
if not isinstance(testcase_params[0], str):
raise RuntimeError(
'The first element of named test parameters is the test name '
'suffix and must be a string')
testcase_name = testcase_params[0]
testcase_params = testcase_params[1:]
else:
raise RuntimeError(
'Named tests must be passed a dict or non-string iterable.')
test_method_name = self._original_name
# Support PEP-8 underscore style for test naming if used.
if (test_method_name.startswith('test_')
and testcase_name
and not testcase_name.startswith('_')):
test_method_name += '_'
bound_param_test.__name__ = test_method_name + str(testcase_name)
elif naming_type is _ARGUMENT_REPR:
# If it's a generator, convert it to a tuple and treat them as
# parameters.
if isinstance(testcase_params, types.GeneratorType):
testcase_params = tuple(testcase_params)
# The metaclass creates a unique, but non-descriptive method name for
# _ARGUMENT_REPR tests using an indexed suffix.
# To keep test names descriptive, only the original method name is used.
# To make sure test names are unique, we add a unique descriptive suffix
# __x_params_repr__ for every test.
params_repr = '(%s)' % (_format_parameter_list(testcase_params),)
bound_param_test.__x_params_repr__ = params_repr
else:
raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
bound_param_test.__doc__ = '%s(%s)' % (
bound_param_test.__name__, _format_parameter_list(testcase_params))
if test_method.__doc__:
bound_param_test.__doc__ += '\n%s' % (test_method.__doc__,)
if inspect.iscoroutinefunction(test_method):
return _async_wrapped(bound_param_test)
return bound_param_test
return (make_bound_param_test(c) for c in self.testcases)
def _modify_class(class_object, testcases, naming_type):
assert not getattr(class_object, '_test_params_reprs', None), (
'Cannot add parameters to %s. Either it already has parameterized '
'methods, or its super class is also a parameterized class.' % (
class_object,))
# NOTE: _test_params_repr is private to parameterized.TestCase and it's
# metaclass; do not use it outside of those classes.
class_object._test_params_reprs = test_params_reprs = {}
for name, obj in class_object.__dict__.copy().items():
if (name.startswith(unittest.TestLoader.testMethodPrefix)
and isinstance(obj, types.FunctionType)):
delattr(class_object, name)
methods = {}
_update_class_dict_for_param_test_case(
class_object.__name__, methods, test_params_reprs, name,
_ParameterizedTestIter(obj, testcases, naming_type, name))
for meth_name, meth in methods.items():
setattr(class_object, meth_name, meth)
def _parameter_decorator(naming_type, testcases):
"""Implementation of the parameterization decorators.
Args:
naming_type: The naming type.
testcases: Testcase parameters.
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A function for modifying the decorated object.
"""
def _apply(obj):
if isinstance(obj, type):
_modify_class(obj, testcases, naming_type)
return obj
else:
return _ParameterizedTestIter(obj, testcases, naming_type)
if (len(testcases) == 1 and
not isinstance(testcases[0], tuple) and
not isinstance(testcases[0], abc.Mapping)):
# Support using a single non-tuple parameter as a list of test cases.
# Note that the single non-tuple parameter can't be Mapping either, which
# means a single dict parameter case.
assert _non_string_or_bytes_iterable(testcases[0]), (
'Single parameter argument must be a non-string non-Mapping iterable')
testcases = testcases[0]
if not isinstance(testcases, abc.Sequence):
testcases = list(testcases)
if not testcases:
raise NoTestsError(
'parameterized test decorators did not generate any tests. '
'Make sure you specify non-empty parameters, '
'and do not reuse generators more than once.')
return _apply
def parameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples/dicts/objects (for tests with only one
argument).
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _parameter_decorator(_ARGUMENT_REPR, testcases)
def named_parameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example. For every parameter tuple
passed, the first element of the tuple should be a string and will be appended
to the name of the test method. Each parameter dict passed must have a value
for the key "testcase_name", the string representation of that value will be
appended to the name of the test method.
Args:
*testcases: Parameters for the decorated method, either a single iterable,
or a list of tuples or dicts.
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _parameter_decorator(_NAMED, testcases)
def product(*kwargs_seqs, **testgrid):
"""A decorator for running tests over cartesian product of parameters values.
See the module docstring for a usage example. The test will be run for every
possible combination of the parameters.
Args:
*kwargs_seqs: Each positional parameter is a sequence of keyword arg dicts;
every test case generated will include exactly one kwargs dict from each
positional parameter; these will then be merged to form an overall list
of arguments for the test case.
**testgrid: A mapping of parameter names and their possible values. Possible
values should given as either a list or a tuple.
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
for name, values in testgrid.items():
assert isinstance(values, (list, tuple)), (
'Values of {} must be given as list or tuple, found {}'.format(
name, type(values)))
prior_arg_names = set()
for kwargs_seq in kwargs_seqs:
assert ((isinstance(kwargs_seq, (list, tuple))) and
all(isinstance(kwargs, dict) for kwargs in kwargs_seq)), (
'Positional parameters must be a sequence of keyword arg'
'dicts, found {}'
.format(kwargs_seq))
if kwargs_seq:
arg_names = set(kwargs_seq[0])
assert all(set(kwargs) == arg_names for kwargs in kwargs_seq), (
'Keyword argument dicts within a single parameter must all have the '
'same keys, found {}'.format(kwargs_seq))
assert not (arg_names & prior_arg_names), (
'Keyword argument dict sequences must all have distinct argument '
'names, found duplicate(s) {}'
.format(sorted(arg_names & prior_arg_names)))
prior_arg_names |= arg_names
assert not (prior_arg_names & set(testgrid)), (
'Arguments supplied in kwargs dicts in positional parameters must not '
'overlap with arguments supplied as named parameters; found duplicate '
'argument(s) {}'.format(sorted(prior_arg_names & set(testgrid))))
# Convert testgrid into a sequence of sequences of kwargs dicts and combine
# with the positional parameters.
# So foo=[1,2], bar=[3,4] --> [[{foo: 1}, {foo: 2}], [{bar: 3, bar: 4}]]
testgrid = (tuple({k: v} for v in vs) for k, vs in testgrid.items())
testgrid = tuple(kwargs_seqs) + tuple(testgrid)
# Create all possible combinations of parameters as a cartesian product
# of parameter values.
testcases = [
dict(itertools.chain.from_iterable(case.items()
for case in cases))
for cases in itertools.product(*testgrid)
]
return _parameter_decorator(_ARGUMENT_REPR, testcases)
class TestGeneratorMetaclass(type):
"""Metaclass for adding tests generated by parameterized decorators."""
def __new__(cls, class_name, bases, dct):
# NOTE: _test_params_repr is private to parameterized.TestCase and it's
# metaclass; do not use it outside of those classes.
test_params_reprs = dct.setdefault('_test_params_reprs', {})
for name, obj in dct.copy().items():
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
_non_string_or_bytes_iterable(obj)):
# NOTE: `obj` might not be a _ParameterizedTestIter in two cases:
# 1. a class-level iterable named test* that isn't a test, such as
# a list of something. Such attributes get deleted from the class.
#
# 2. If a decorator is applied to the parameterized test, e.g.
# @morestuff
# @parameterized.parameters(...)
# def test_foo(...): ...
#
# This is OK so long as the underlying parameterized function state
# is forwarded (e.g. using functool.wraps() and **without**
# accessing explicitly accessing the internal attributes.
if isinstance(obj, _ParameterizedTestIter):
# Update the original test method name so it's more accurate.
# The mismatch might happen when another decorator is used inside
# the parameterized decrators, and the inner decorator doesn't
# preserve its __name__.
obj._original_name = name
iterator = iter(obj)
dct.pop(name)
_update_class_dict_for_param_test_case(
class_name, dct, test_params_reprs, name, iterator)
# If the base class is a subclass of parameterized.TestCase, inherit its
# _test_params_reprs too.
for base in bases:
# Check if the base has _test_params_reprs first, then check if it's a
# subclass of parameterized.TestCase. Otherwise when this is called for
# the parameterized.TestCase definition itself, this raises because
# itself is not defined yet. This works as long as absltest.TestCase does
# not define _test_params_reprs.
base_test_params_reprs = getattr(base, '_test_params_reprs', None)
if base_test_params_reprs and issubclass(base, TestCase):
for test_method, test_method_id in base_test_params_reprs.items():
# test_method may both exists in base and this class.
# This class's method overrides base class's.
# That's why it should only inherit it if it does not exist.
test_params_reprs.setdefault(test_method, test_method_id)
return type.__new__(cls, class_name, bases, dct)
def _update_class_dict_for_param_test_case(
test_class_name, dct, test_params_reprs, name, iterator):
"""Adds individual test cases to a dictionary.
Args:
test_class_name: The name of the class tests are added to.
dct: The target dictionary.
test_params_reprs: The dictionary for mapping names to test IDs.
name: The original name of the test case.
iterator: The iterator generating the individual test cases.
Raises:
DuplicateTestNameError: Raised when a test name occurs multiple times.
RuntimeError: If non-parameterized functions are generated.
"""
for idx, func in enumerate(iterator):
assert callable(func), 'Test generators must yield callables, got %r' % (
func,)
if not (getattr(func, '__x_use_name__', None) or
getattr(func, '__x_params_repr__', None)):
raise RuntimeError(
'{}.{} generated a test function without using the parameterized '
'decorators. Only tests generated using the decorators are '
'supported.'.format(test_class_name, name))
if getattr(func, '__x_use_name__', False):
original_name = func.__name__
new_name = original_name
else:
original_name = name
new_name = '%s%d' % (original_name, idx)
if new_name in dct:
raise DuplicateTestNameError(test_class_name, new_name, original_name)
dct[new_name] = func
test_params_reprs[new_name] = getattr(func, '__x_params_repr__', '')
class TestCase(absltest.TestCase, metaclass=TestGeneratorMetaclass):
"""Base class for test cases using the parameters decorator."""
# visibility: private; do not call outside this class.
def _get_params_repr(self):
return self._test_params_reprs.get(self._testMethodName, '')
def __str__(self):
params_repr = self._get_params_repr()
if params_repr:
params_repr = ' ' + params_repr
return '{}{} ({})'.format(
self._testMethodName, params_repr,
unittest.util.strclass(self.__class__))
def id(self):
"""Returns the descriptive ID of the test.
This is used internally by the unittesting framework to get a name
for the test to be used in reports.
Returns:
The test id.
"""
base = super(TestCase, self).id()
params_repr = self._get_params_repr()
if params_repr:
# We include the params in the id so that, when reported in the
# test.xml file, the value is more informative than just "test_foo0".
# Use a space to separate them so that it's copy/paste friendly and
# easy to identify the actual test id.
return '{} {}'.format(base, params_repr)
else:
return base
# This function is kept CamelCase because it's used as a class's base class.
def CoopTestCase(other_base_class): # pylint: disable=invalid-name
"""Returns a new base class with a cooperative metaclass base.
This enables the TestCase to be used in combination
with other base classes that have custom metaclasses, such as
``mox.MoxTestBase``.
Only works with metaclasses that do not override ``type.__new__``.
Example::
from absl.testing import parameterized
class ExampleTest(parameterized.CoopTestCase(OtherTestCase)):
...
Args:
other_base_class: (class) A test case base class.
Returns:
A new class object.
"""
# If the other base class has a metaclass of 'type' then trying to combine
# the metaclasses will result in an MRO error. So simply combine them and
# return.
if type(other_base_class) == type: # pylint: disable=unidiomatic-typecheck
warnings.warn(
'CoopTestCase is only necessary when combining with a class that uses'
' a metaclass. Use multiple inheritance like this instead: class'
f' ExampleTest(paramaterized.TestCase, {other_base_class.__name__}):',
stacklevel=2,
)
class CoopTestCaseBase(other_base_class, TestCase):
pass
return CoopTestCaseBase
else:
class CoopMetaclass(type(other_base_class), TestGeneratorMetaclass): # pylint: disable=unused-variable
pass
class CoopTestCaseBase(other_base_class, TestCase, metaclass=CoopMetaclass):
pass
return CoopTestCaseBase

View File

@ -0,0 +1,563 @@
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python test reporter that generates test reports in JUnit XML format."""
import datetime
import re
import sys
import threading
import time
import traceback
import unittest
from xml.sax import saxutils
from absl.testing import _pretty_print_reporter
# See http://www.w3.org/TR/REC-xml/#NT-Char
_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD}
_control_character_conversions = {
chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes}
_escape_xml_attr_conversions = {
'"': '&quot;',
"'": '&apos;',
'\n': '&#xA;',
'\t': '&#x9;',
'\r': '&#xD;',
' ': '&#x20;'}
_escape_xml_attr_conversions.update(_control_character_conversions)
# When class or module level function fails, unittest/suite.py adds a
# _ErrorHolder instance instead of a real TestCase, and it has a description
# like "setUpClass (__main__.MyTestCase)".
_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$')
# NOTE: while saxutils.quoteattr() theoretically does the same thing; it
# seems to often end up being too smart for it's own good not escaping properly.
# This function is much more reliable.
def _escape_xml_attr(content):
"""Escapes xml attributes."""
# Note: saxutils doesn't escape the quotes.
return saxutils.escape(content, _escape_xml_attr_conversions)
def _escape_cdata(s):
"""Escapes a string to be used as XML CDATA.
CDATA characters are treated strictly as character data, not as XML markup,
but there are still certain restrictions on them.
Args:
s: the string to be escaped.
Returns:
An escaped version of the input string.
"""
for char, escaped in _control_character_conversions.items():
s = s.replace(char, escaped)
return s.replace(']]>', ']] >')
def _iso8601_timestamp(timestamp):
"""Produces an ISO8601 datetime.
Args:
timestamp: an Epoch based timestamp in seconds.
Returns:
A iso8601 format timestamp if the input is a valid timestamp, None otherwise
"""
if timestamp is None or timestamp < 0:
return None
return datetime.datetime.fromtimestamp(
timestamp, tz=datetime.timezone.utc).isoformat()
def _print_xml_element_header(element, attributes, stream, indentation=''):
"""Prints an XML header of an arbitrary element.
Args:
element: element name (testsuites, testsuite, testcase)
attributes: 2-tuple list with (attributes, values) already escaped
stream: output stream to write test report XML to
indentation: indentation added to the element header
"""
stream.write('%s<%s' % (indentation, element))
for attribute in attributes:
if (len(attribute) == 2 and attribute[0] is not None and
attribute[1] is not None):
stream.write(' %s="%s"' % (attribute[0], attribute[1]))
stream.write('>\n')
# Copy time.time which ensures the real time is used internally.
# This prevents bad interactions with tests that stub out time.
_time_copy = time.time
if hasattr(traceback, '_some_str'):
# Use the traceback module str function to format safely.
_safe_str = traceback._some_str
else:
_safe_str = str # pylint: disable=invalid-name
class _TestCaseResult(object):
"""Private helper for _TextAndXMLTestResult that represents a test result.
Attributes:
test: A TestCase instance of an individual test method.
name: The name of the individual test method.
full_class_name: The full name of the test class.
run_time: The duration (in seconds) it took to run the test.
start_time: Epoch relative timestamp of when test started (in seconds)
errors: A list of error 4-tuples. Error tuple entries are
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: A string explaining why the test was skipped.
"""
def __init__(self, test):
self.run_time = -1
self.start_time = -1
self.skip_reason = None
self.errors = []
self.test = test
# Parse the test id to get its test name and full class path.
# Unfortunately there is no better way of knowning the test and class.
# Worse, unittest uses _ErrorHandler instances to represent class / module
# level failures.
test_desc = test.id() or str(test)
# Check if it's something like "setUpClass (__main__.TestCase)".
match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc)
if match:
name = match.group(1)
full_class_name = match.group(2)
else:
class_name = unittest.util.strclass(test.__class__)
if isinstance(test, unittest.case._SubTest):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
class_name = unittest.util.strclass(test.test_case.__class__)
if test_desc.startswith(class_name + '.'):
# In a typical unittest.TestCase scenario, test.id() returns with
# a class name formatted using unittest.util.strclass.
name = test_desc[len(class_name)+1:]
full_class_name = class_name
else:
# Otherwise make a best effort to guess the test name and full class
# path.
parts = test_desc.rsplit('.', 1)
name = parts[-1]
full_class_name = parts[0] if len(parts) == 2 else ''
self.name = _escape_xml_attr(name)
self.full_class_name = _escape_xml_attr(full_class_name)
def set_run_time(self, time_in_secs):
self.run_time = time_in_secs
def set_start_time(self, time_in_secs):
self.start_time = time_in_secs
def print_xml_summary(self, stream):
"""Prints an XML Summary of a TestCase.
Status and result are populated as per JUnit XML test result reporter.
A test that has been skipped will always have a skip reason,
as every skip method in Python's unittest requires the reason arg to be
passed.
Args:
stream: output stream to write test report XML to
"""
if self.skip_reason is None:
status = 'run'
result = 'completed'
else:
status = 'notrun'
result = 'suppressed'
test_case_attributes = [
('name', '%s' % self.name),
('status', '%s' % status),
('result', '%s' % result),
('time', '%.3f' % self.run_time),
('classname', self.full_class_name),
('timestamp', _iso8601_timestamp(self.start_time)),
]
_print_xml_element_header('testcase', test_case_attributes, stream, ' ')
self._print_testcase_details(stream)
stream.write(' </testcase>\n')
def _print_testcase_details(self, stream):
for error in self.errors:
outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence
message = _escape_xml_attr(_safe_str(message))
exception_type = _escape_xml_attr(str(exception_type))
error_msg = _escape_cdata(error_msg)
stream.write(' <%s message="%s" type="%s"><![CDATA[%s]]></%s>\n'
% (outcome, message, exception_type, error_msg, outcome))
class _TestSuiteResult(object):
"""Private helper for _TextAndXMLTestResult."""
def __init__(self):
self.suites = {}
self.failure_counts = {}
self.error_counts = {}
self.overall_start_time = -1
self.overall_end_time = -1
self._testsuites_properties = {}
def add_test_case_result(self, test_case_result):
suite_name = type(test_case_result.test).__name__
if suite_name == '_ErrorHolder':
# _ErrorHolder is a special case created by unittest for class / module
# level functions.
suite_name = test_case_result.full_class_name.rsplit('.')[-1]
if isinstance(test_case_result.test, unittest.case._SubTest):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
suite_name = type(test_case_result.test.test_case).__name__
self._setup_test_suite(suite_name)
self.suites[suite_name].append(test_case_result)
for error in test_case_result.errors:
# Only count the first failure or error so that the sum is equal to the
# total number of *testcases* that have failures or errors.
if error[0] == 'failure':
self.failure_counts[suite_name] += 1
break
elif error[0] == 'error':
self.error_counts[suite_name] += 1
break
def print_xml_summary(self, stream):
overall_test_count = sum(len(x) for x in self.suites.values())
overall_failures = sum(self.failure_counts.values())
overall_errors = sum(self.error_counts.values())
overall_attributes = [
('name', ''),
('tests', '%d' % overall_test_count),
('failures', '%d' % overall_failures),
('errors', '%d' % overall_errors),
('time', '%.3f' % (self.overall_end_time - self.overall_start_time)),
('timestamp', _iso8601_timestamp(self.overall_start_time)),
]
_print_xml_element_header('testsuites', overall_attributes, stream)
if self._testsuites_properties:
stream.write(' <properties>\n')
for name, value in sorted(self._testsuites_properties.items()):
stream.write(' <property name="%s" value="%s"></property>\n' %
(_escape_xml_attr(name), _escape_xml_attr(str(value))))
stream.write(' </properties>\n')
for suite_name in self.suites:
suite = self.suites[suite_name]
suite_end_time = max(x.start_time + x.run_time for x in suite)
suite_start_time = min(x.start_time for x in suite)
failures = self.failure_counts[suite_name]
errors = self.error_counts[suite_name]
suite_attributes = [
('name', '%s' % suite_name),
('tests', '%d' % len(suite)),
('failures', '%d' % failures),
('errors', '%d' % errors),
('time', '%.3f' % (suite_end_time - suite_start_time)),
('timestamp', _iso8601_timestamp(suite_start_time)),
]
_print_xml_element_header('testsuite', suite_attributes, stream)
# test_case_result entries are not guaranteed to be in any user-friendly
# order, especially when using subtests. So sort them.
for test_case_result in sorted(suite, key=lambda t: t.name):
test_case_result.print_xml_summary(stream)
stream.write('</testsuite>\n')
stream.write('</testsuites>\n')
def _setup_test_suite(self, suite_name):
"""Adds a test suite to the set of suites tracked by this test run.
Args:
suite_name: string, The name of the test suite being initialized.
"""
if suite_name in self.suites:
return
self.suites[suite_name] = []
self.failure_counts[suite_name] = 0
self.error_counts[suite_name] = 0
def set_end_time(self, timestamp_in_secs):
"""Sets the start timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_end_time = timestamp_in_secs
def set_start_time(self, timestamp_in_secs):
"""Sets the end timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_start_time = timestamp_in_secs
class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult):
"""Private TestResult class that produces both formatted text results and XML.
Used by TextAndXMLTestRunner.
"""
_TEST_SUITE_RESULT_CLASS = _TestSuiteResult
_TEST_CASE_RESULT_CLASS = _TestCaseResult
def __init__(self, xml_stream, stream, descriptions, verbosity,
time_getter=_time_copy, testsuites_properties=None):
super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity)
self.xml_stream = xml_stream
self.pending_test_case_results = {}
self.suite = self._TEST_SUITE_RESULT_CLASS()
if testsuites_properties:
self.suite._testsuites_properties = testsuites_properties
self.time_getter = time_getter
# This lock guards any mutations on pending_test_case_results.
self._pending_test_case_results_lock = threading.RLock()
def startTest(self, test):
self.start_time = self.time_getter()
super(_TextAndXMLTestResult, self).startTest(test)
def stopTest(self, test):
# Grabbing the write lock to avoid conflicting with stopTestRun.
with self._pending_test_case_results_lock:
super(_TextAndXMLTestResult, self).stopTest(test)
result = self.get_pending_test_case_result(test)
if not result:
test_name = test.id() or str(test)
sys.stderr.write('No pending test case: %s\n' % test_name)
return
if getattr(self, 'start_time', None) is None:
# startTest may not be called for skipped tests since Python 3.12.1.
self.start_time = self.time_getter()
test_id = id(test)
run_time = self.time_getter() - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
del self.pending_test_case_results[test_id]
def startTestRun(self):
self.suite.set_start_time(self.time_getter())
super(_TextAndXMLTestResult, self).startTestRun()
def stopTestRun(self):
self.suite.set_end_time(self.time_getter())
# All pending_test_case_results will be added to the suite and removed from
# the pending_test_case_results dictionary. Grabbing the write lock to avoid
# results from being added during this process to avoid duplicating adds or
# accidentally erasing newly appended pending results.
with self._pending_test_case_results_lock:
# Errors in the test fixture (setUpModule, tearDownModule,
# setUpClass, tearDownClass) can leave a pending result which
# never gets added to the suite. The runner calls stopTestRun
# which gives us an opportunity to add these errors for
# reporting here.
for test_id in self.pending_test_case_results:
result = self.pending_test_case_results[test_id]
if getattr(self, 'start_time', None) is not None:
run_time = self.suite.overall_end_time - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
self.pending_test_case_results.clear()
def _exc_info_to_string(self, err, test=None):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method must be overridden because the method signature in
unittest.TestResult changed between Python 2.2 and 2.4.
Args:
err: A sys.exc_info() tuple of values for an error.
test: The test method.
Returns:
A formatted exception string.
"""
if test:
return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test)
return ''.join(traceback.format_exception(*err))
def add_pending_test_case_result(self, test, error_summary=None,
skip_reason=None):
"""Adds result information to a test case result which may still be running.
If a result entry for the test already exists, add_pending_test_case_result
will add error summary tuples and/or overwrite skip_reason for the result.
If it does not yet exist, a result entry will be created.
Note that a test result is considered to have been run and passed
only if there are no errors or skip_reason.
Args:
test: A test method as defined by unittest
error_summary: A 4-tuple with the following entries:
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: a string explaining why the test was skipped
"""
with self._pending_test_case_results_lock:
test_id = id(test)
if test_id not in self.pending_test_case_results:
self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS(
test)
if error_summary:
self.pending_test_case_results[test_id].errors.append(error_summary)
if skip_reason:
self.pending_test_case_results[test_id].skip_reason = skip_reason
def delete_pending_test_case_result(self, test):
with self._pending_test_case_results_lock:
test_id = id(test)
del self.pending_test_case_results[test_id]
def get_pending_test_case_result(self, test):
test_id = id(test)
return self.pending_test_case_results.get(test_id, None)
def addSuccess(self, test):
super(_TextAndXMLTestResult, self).addSuccess(test)
self.add_pending_test_case_result(test)
def addError(self, test, err):
super(_TextAndXMLTestResult, self).addError(test, err)
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addFailure(self, test, err):
super(_TextAndXMLTestResult, self).addFailure(test, err)
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSkip(self, test, reason):
super(_TextAndXMLTestResult, self).addSkip(test, reason)
self.add_pending_test_case_result(test, skip_reason=reason)
def addExpectedFailure(self, test, err):
super(_TextAndXMLTestResult, self).addExpectedFailure(test, err)
if callable(getattr(test, 'recordProperty', None)):
test.recordProperty('EXPECTED_FAILURE',
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test)
def addUnexpectedSuccess(self, test):
super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test)
test_name = test.id() or str(test)
error_summary = ('error', '', '',
'Test case %s should have failed, but passed.'
% (test_name))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name
super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err)
if err is not None:
if issubclass(err[0], test.failureException):
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = None
self.add_pending_test_case_result(subtest, error_summary=error_summary)
def printErrors(self):
super(_TextAndXMLTestResult, self).printErrors()
self.xml_stream.write('<?xml version="1.0"?>\n')
self.suite.print_xml_summary(self.xml_stream)
class TextAndXMLTestRunner(unittest.TextTestRunner):
"""A test runner that produces both formatted text results and XML.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
_TEST_RESULT_CLASS = _TextAndXMLTestResult
_xml_stream = None
_testsuites_properties = {}
def __init__(self, xml_stream=None, *args, **kwargs):
"""Initialize a TextAndXMLTestRunner.
Args:
xml_stream: file-like or None; XML-formatted test results are output
via this object's write() method. If None (the default), the
new instance behaves as described in the set_default_xml_stream method
documentation below.
*args: passed unmodified to unittest.TextTestRunner.__init__.
**kwargs: passed unmodified to unittest.TextTestRunner.__init__.
"""
super(TextAndXMLTestRunner, self).__init__(*args, **kwargs)
if xml_stream is not None:
self._xml_stream = xml_stream
# else, do not set self._xml_stream to None -- this allows implicit fallback
# to the class attribute's value.
@classmethod
def set_default_xml_stream(cls, xml_stream):
"""Sets the default XML stream for the class.
Args:
xml_stream: file-like or None; used for instances when xml_stream is None
or not passed to their constructors. If None is passed, instances
created with xml_stream=None will act as ordinary TextTestRunner
instances; this is the default state before any calls to this method
have been made.
"""
cls._xml_stream = xml_stream
def _makeResult(self):
if self._xml_stream is None:
return super(TextAndXMLTestRunner, self)._makeResult()
else:
return self._TEST_RESULT_CLASS(
self._xml_stream, self.stream, self.descriptions, self.verbosity,
testsuites_properties=self._testsuites_properties)
@classmethod
def set_testsuites_property(cls, key, value):
cls._testsuites_properties[key] = value

View File

@ -0,0 +1,7 @@
# This is the list of Abseil authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google Inc.

View File

@ -0,0 +1 @@
pip

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,84 @@
Metadata-Version: 2.1
Name: absl-py
Version: 2.1.0
Summary: Abseil Python Common Libraries, see https://github.com/abseil/abseil-py.
Home-page: https://github.com/abseil/abseil-py
Author: The Abseil Authors
License: Apache 2.0
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Intended Audience :: Developers
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Requires-Python: >=3.7
Description-Content-Type: text/markdown
License-File: LICENSE
License-File: AUTHORS
# Abseil Python Common Libraries
This repository is a collection of Python library code for building Python
applications. The code is collected from Google's own Python code base, and has
been extensively tested and used in production.
## Features
* Simple application startup
* Distributed commandline flags system
* Custom logging module with additional features
* Testing utilities
## Getting Started
### Installation
To install the package, simply run:
```bash
pip install absl-py
```
Or install from source:
```bash
python setup.py install
```
### Running Tests
To run Abseil tests, you can clone the git repo and run
[bazel](https://bazel.build/):
```bash
git clone https://github.com/abseil/abseil-py.git
cd abseil-py
bazel test absl/...
```
### Example Code
Please refer to
[smoke_tests/sample_app.py](https://github.com/abseil/abseil-py/blob/main/smoke_tests/sample_app.py)
as an example to get started.
## Documentation
See the [Abseil Python Developer Guide](https://abseil.io/docs/python/).
## Future Releases
The current repository includes an initial set of libraries for early adoption.
More components and interoperability with Abseil C++ Common Libraries
will come in future releases.
## License
The Abseil Python library is licensed under the terms of the Apache
license. See [LICENSE](LICENSE) for more information.

View File

@ -0,0 +1,53 @@
absl/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
absl/__pycache__/__init__.cpython-311.pyc,,
absl/__pycache__/app.cpython-311.pyc,,
absl/__pycache__/command_name.cpython-311.pyc,,
absl/app.py,sha256=DQROJ_Ovex6w2_nr_s7AHgXQle951XmcVtlNrMjfSFA,15374
absl/app.pyi,sha256=DqRvFRos3oFk00lZJSKaHZuL_3-LnZl-ylg_VAXtPcc,1737
absl/command_name.py,sha256=C7CuwMMedDLUOX88Et92QZb2se__nU7txgpO-01amxg,2301
absl/flags/__init__.py,sha256=FgR_NxQG1xLA2ZxLU51HTrLWV5kbN9eSCI-47Z7D3WA,7728
absl/flags/__pycache__/__init__.cpython-311.pyc,,
absl/flags/__pycache__/_argument_parser.cpython-311.pyc,,
absl/flags/__pycache__/_defines.cpython-311.pyc,,
absl/flags/__pycache__/_exceptions.cpython-311.pyc,,
absl/flags/__pycache__/_flag.cpython-311.pyc,,
absl/flags/__pycache__/_flagvalues.cpython-311.pyc,,
absl/flags/__pycache__/_helpers.cpython-311.pyc,,
absl/flags/__pycache__/_validators.cpython-311.pyc,,
absl/flags/__pycache__/_validators_classes.cpython-311.pyc,,
absl/flags/__pycache__/argparse_flags.cpython-311.pyc,,
absl/flags/_argument_parser.py,sha256=TQFhT0OcQuRO_1GTJoUvYC1KU6wV9f4Lc7jQmajBGi0,20934
absl/flags/_defines.py,sha256=s_YA_tAHFU4wxrJqKLH5uMldTl1DtlUfSvgBbflXkQ8,52783
absl/flags/_exceptions.py,sha256=Lws7ZZrlLJG83VHuOB4Z4CNfcSoKX5pJnsNRCtp-dMw,3657
absl/flags/_flag.py,sha256=Sv_d7kDSZh-VNr4JGrBy4g7VxnbRspOOd5hO6wA94qk,19895
absl/flags/_flagvalues.py,sha256=Gferpr9yg8Ntc6ij9tPiChliYz5jYWfVJoKzAREwNFw,54127
absl/flags/_helpers.py,sha256=uWWeqbhc19kTXonfM7mNZT68ZakmJgu-v5IHeS9A9Xc,14081
absl/flags/_validators.py,sha256=_hpVwThXQhL6PFOA9-L2ZRI-7zLu2UxU_hRJJWXYoHw,14144
absl/flags/_validators_classes.py,sha256=KLBJhJAt8C18gy2Uq-q7bUFNS_AhPBlxlwGiNm5gWXU,6157
absl/flags/argparse_flags.py,sha256=57E1HFa40tvnQ3DQzY3x1qdBUIxtfTTYAYONT_k8HOI,14485
absl/logging/__init__.py,sha256=mzF3rusWjzLbuVdZI8SfPiIoqfWO9kBUhxVOvGZQTv4,42082
absl/logging/__init__.pyi,sha256=NPAna_9rrYTVNIHLXUbdvsAZcNlv4IJs9yNnL59mxr8,5794
absl/logging/__pycache__/__init__.cpython-311.pyc,,
absl/logging/__pycache__/converter.cpython-311.pyc,,
absl/logging/converter.py,sha256=eTucx1Ojix7YWMQUyWKzPRTrxGLuCkNsTmJa1GW6k94,6353
absl/testing/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
absl/testing/__pycache__/__init__.cpython-311.pyc,,
absl/testing/__pycache__/_bazelize_command.cpython-311.pyc,,
absl/testing/__pycache__/_pretty_print_reporter.cpython-311.pyc,,
absl/testing/__pycache__/absltest.cpython-311.pyc,,
absl/testing/__pycache__/flagsaver.cpython-311.pyc,,
absl/testing/__pycache__/parameterized.cpython-311.pyc,,
absl/testing/__pycache__/xml_reporter.cpython-311.pyc,,
absl/testing/_bazelize_command.py,sha256=R4rV4j5AOSp3PNkVQKP1I-SKYzQbXyeuiOT3d23cTLA,2302
absl/testing/_pretty_print_reporter.py,sha256=nL5qSsYWF6O_C6L9PexwFSPxs68Wc85RhdhRBN2AgTw,3140
absl/testing/absltest.py,sha256=sgb0TPgNP0_nLKcxrHBlifvUsgufnYURVR8Vau3f278,101119
absl/testing/flagsaver.py,sha256=514JmVdCn-P0jsTntskCtUfxrHyp3urLdn2bzDd991s,13392
absl/testing/parameterized.py,sha256=PT1P3X__WkFC_NyGWifUdJeqn-BM4JI3yy-1zsGaFEI,27807
absl/testing/xml_reporter.py,sha256=k_9cWhw01RGCQImGDciTa_RrBEEuPZ3IPD5IASoRwwM,21720
absl_py-2.1.0.dist-info/AUTHORS,sha256=YoLudsylaQg7W5mLn4FroQMuEnuNx8RpQrhkd_xvv6U,296
absl_py-2.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
absl_py-2.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
absl_py-2.1.0.dist-info/METADATA,sha256=CTp5OILgEjYv4Y7dpCHzW5QmM57hl-2i-AizwFlnRYA,2311
absl_py-2.1.0.dist-info/RECORD,,
absl_py-2.1.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
absl_py-2.1.0.dist-info/top_level.txt,sha256=0M_1z27Hi5Bsj1EhTfE_ajdJdFxeP_aw0xXnR4BXXhI,5

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.42.0)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@ -0,0 +1 @@
absl

View File

@ -0,0 +1,20 @@
=======
Credits
=======
Maintainer
----------
* Simon Percivall <percivall@gmail.com>
Authors
-------
* The Python Software Foundation
* Bogdan Opanchuk
* Vladimir Iakovlev
* Thomas Grainger
* Amund Hov
* Jakub Wilk
* Mateusz Bysiek
* Serge Sans Paille

View File

@ -0,0 +1 @@
pip

View File

@ -0,0 +1,64 @@
LICENSE
=======
Copyright (c) 2014, Simon Percivall
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of AST Unparser nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are retained
in Python alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.

View File

@ -0,0 +1,165 @@
Metadata-Version: 2.1
Name: astunparse
Version: 1.6.3
Summary: An AST unparser for Python
Home-page: https://github.com/simonpercivall/astunparse
Maintainer: Simon Percivall
Maintainer-email: percivall@gmail.com
License: BSD
Keywords: astunparse
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Natural Language :: English
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Software Development :: Code Generators
Requires-Dist: wheel (<1.0,>=0.23.0)
Requires-Dist: six (<2.0,>=1.6.1)
============
AST Unparser
============
.. image:: https://badge.fury.io/py/astunparse.png
:target: http://badge.fury.io/py/astunparse
.. image:: https://travis-ci.org/simonpercivall/astunparse.png?branch=master
:target: https://travis-ci.org/simonpercivall/astunparse
.. image:: https://readthedocs.org/projects/astunparse/badge/
:target: https://astunparse.readthedocs.org/
An AST unparser for Python.
This is a factored out version of ``unparse`` found in the Python
source distribution; under Demo/parser in Python 2 and under Tools/parser
in Python 3.
Basic example::
import inspect
import ast
import astunparse
# get back the source code
astunparse.unparse(ast.parse(inspect.getsource(ast)))
# get a pretty-printed dump of the AST
astunparse.dump(ast.parse(inspect.getsource(ast)))
This library is single-source compatible with Python 2.6 through Python 3.5. It
is authored by the Python core developers; I have simply merged the Python 2.7
and the Python 3.5 source and test suites, and added a wrapper. This factoring
out is to provide a library implementation that supports both versions.
Added to this is a pretty-printing ``dump`` utility function.
The test suite both runs specific tests and also roundtrips much of the
standard library.
Extensions and Alternatives
---------------------------
Similar projects include:
* codegen_
* astor_
* astmonkey_
* astprint_
None of these roundtrip much of the standard library and fail several of the basic
tests in the ``test_unparse`` test suite.
This library uses mature and core maintained code instead of trying to patch
existing libraries. The ``unparse`` and the ``test_unparse`` modules
are under the PSF license.
Extensions include:
* typed-astunparse: extends astunparse to support type annotations.
* Documentation: http://astunparse.rtfd.org.
Features
--------
* unparses Python AST.
* pretty-prints AST.
.. _codegen: https://github.com/andreif/codegen
.. _astor: https://github.com/berkerpeksag/astor
.. _astmonkey: https://github.com/konradhalas/astmonkey
.. _astprint: https://github.com/Manticore/astprint
Changelog
=========
Here's the recent changes to AST Unparser.
1.6.3 - 2019-12-22
~~~~~~~~~~~~~~~~~~
* Add full support for Python 3.8
1.6.2 - 2019-01-19
~~~~~~~~~~~~~~~~~~
* Add support for the Constant node in Python 3.8
* Add tests to the sdist
1.6.1 - 2018-10-03
~~~~~~~~~~~~~~~~~~
* Fix the roundtripping of very complex f-strings.
1.6.0 - 2018-09-30
~~~~~~~~~~~~~~~~~~
* Python 3.7 compatibility
1.5.0 - 2017-02-05
~~~~~~~~~~~~~~~~~~
* Python 3.6 compatibility
* bugfix: correct argparser option type
1.4.0 - 2016-06-24
~~~~~~~~~~~~~~~~~~
* Support for the ``async`` keyword
* Support for unparsing "Interactive" and "Expression" nodes
1.3.0 - 2016-01-17
~~~~~~~~~~~~~~~~~~
* Python 3.5 compatibility
1.2.0 - 2014-04-03
~~~~~~~~~~~~~~~~~~
* Python 2.6 through 3.4 compatibility
* A new function ``dump`` is added to return a pretty-printed version
of the AST. It's also available when running ``python -m astunparse``
as the ``--dump`` argument.
1.1.0 - 2014-04-01
~~~~~~~~~~~~~~~~~~
* ``unparse`` will return the source code for an AST. It is pretty
feature-complete, and round-trips the stdlib, and is compatible with
Python 2.7 and Python 3.4.
Running ``python -m astunparse`` will print the round-tripped source
for any python files given as argument.

View File

@ -0,0 +1,15 @@
astunparse-1.6.3.dist-info/AUTHORS.rst,sha256=_VxW6Jgbo7Qu1u8z3O3uS0_AoCwuagAPoHwsc5TwxVw,257
astunparse-1.6.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
astunparse-1.6.3.dist-info/LICENSE,sha256=kvwOT0-pRgVY7t80ErmI1DOi3Ls6nEVAKhRaT6uKasY,3936
astunparse-1.6.3.dist-info/METADATA,sha256=vQ5__mA9vvjnREIJ61SolBrPCW52OugdLCScjVsL_Ec,4391
astunparse-1.6.3.dist-info/RECORD,,
astunparse-1.6.3.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
astunparse-1.6.3.dist-info/top_level.txt,sha256=D97cApMaOtwZJ6KEsu7GakV5Kt7YpcRkL0dD010ftIo,11
astunparse/__init__.py,sha256=Vi1Ni9JeZyX7kUX7YEgvaj1S6ky6RyUy8qL_CON1Ugw,357
astunparse/__main__.py,sha256=OeZij_FXCdGilJ1G5H7H0ChEOuDJUqY_hFCLe8GZpc8,1222
astunparse/__pycache__/__init__.cpython-311.pyc,,
astunparse/__pycache__/__main__.cpython-311.pyc,,
astunparse/__pycache__/printer.cpython-311.pyc,,
astunparse/__pycache__/unparser.cpython-311.pyc,,
astunparse/printer.py,sha256=8K09sbxtItuH9qE7jphZfN4vudsn3KpIWssudeBAXGo,1408
astunparse/unparser.py,sha256=RnI-3goUb3N72KIDHDRiFApOJZfIcAtQ5vEIUY7jB5M,27426

View File

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.33.6)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View File

@ -0,0 +1 @@
astunparse

View File

@ -0,0 +1,20 @@
# coding: utf-8
from __future__ import absolute_import
from six.moves import cStringIO
from .unparser import Unparser
from .printer import Printer
__version__ = '1.6.3'
def unparse(tree):
v = cStringIO()
Unparser(tree, file=v)
return v.getvalue()
def dump(tree):
v = cStringIO()
Printer(file=v).visit(tree)
return v.getvalue()

View File

@ -0,0 +1,48 @@
from __future__ import print_function
import sys
import os
import argparse
from .unparser import roundtrip
from . import dump
def roundtrip_recursive(target, dump_tree=False):
if os.path.isfile(target):
print(target)
print("=" * len(target))
if dump_tree:
dump(target)
else:
roundtrip(target)
print()
elif os.path.isdir(target):
for item in os.listdir(target):
if item.endswith(".py"):
roundtrip_recursive(os.path.join(target, item), dump_tree)
else:
print(
"WARNING: skipping '%s', not a file or directory" % target,
file=sys.stderr
)
def main(args):
parser = argparse.ArgumentParser(prog="astunparse")
parser.add_argument(
'target',
nargs='+',
help="Files or directories to show roundtripped source for"
)
parser.add_argument(
'--dump',
type=bool,
help="Show a pretty-printed AST instead of the source"
)
arguments = parser.parse_args(args)
for target in arguments.target:
roundtrip_recursive(target, dump_tree=arguments.dump)
if __name__ == "__main__":
main(sys.argv[1:])

View File

@ -0,0 +1,51 @@
from __future__ import unicode_literals
import sys
import ast
import six
class Printer(ast.NodeVisitor):
def __init__(self, file=sys.stdout, indent=" "):
self.indentation = 0
self.indent_with = indent
self.f = file
# overridden to make the API obvious
def visit(self, node):
super(Printer, self).visit(node)
def write(self, text):
self.f.write(six.text_type(text))
def generic_visit(self, node):
if isinstance(node, list):
nodestart = "["
nodeend = "]"
children = [("", child) for child in node]
else:
nodestart = type(node).__name__ + "("
nodeend = ")"
children = [(name + "=", value) for name, value in ast.iter_fields(node)]
if len(children) > 1:
self.indentation += 1
self.write(nodestart)
for i, pair in enumerate(children):
attr, child = pair
if len(children) > 1:
self.write("\n" + self.indent_with * self.indentation)
if isinstance(child, (ast.AST, list)):
self.write(attr)
self.visit(child)
else:
self.write(attr + repr(child))
if i != len(children) - 1:
self.write(",")
self.write(nodeend)
if len(children) > 1:
self.indentation -= 1

View File

@ -0,0 +1,906 @@
"Usage: unparse.py <path to source file>"
from __future__ import print_function, unicode_literals
import six
import sys
import ast
import os
import tokenize
from six import StringIO
# Large float and imaginary literals get turned into infinities in the AST.
# We unparse those infinities to INFSTR.
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between.
"""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)