diff --git a/README.MD b/README.MD new file mode 100644 index 0000000..4a59965 --- /dev/null +++ b/README.MD @@ -0,0 +1,2 @@ +# Cat or Not - first prototype +### Monday 15:30 group diff --git a/venv/bin/gunicorn b/venv/bin/gunicorn new file mode 100755 index 0000000..4f91070 --- /dev/null +++ b/venv/bin/gunicorn @@ -0,0 +1,11 @@ +#!/home/c00ler/cat_or_not/venv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from gunicorn.app.wsgiapp import run + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv/bin/gunicorn_paster b/venv/bin/gunicorn_paster new file mode 100755 index 0000000..df4251e --- /dev/null +++ b/venv/bin/gunicorn_paster @@ -0,0 +1,11 @@ +#!/home/c00ler/cat_or_not/venv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from gunicorn.app.pasterapp import run + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..65865a9 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt @@ -0,0 +1,23 @@ +2009-2018 (c) BenoƮt Chesneau +2009-2015 (c) Paul J. Davis + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA new file mode 100644 index 0000000..148e500 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA @@ -0,0 +1,111 @@ +Metadata-Version: 2.1 +Name: gunicorn +Version: 19.9.0 +Summary: WSGI HTTP Server for UNIX +Home-page: http://gunicorn.org +Author: Benoit Chesneau +Author-email: benoitc@e-engura.com +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Other Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet +Classifier: Topic :: Utilities +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Internet :: WWW/HTTP :: WSGI +Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Server +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Requires-Python: >=2.6, !=3.0.*, !=3.1.* +Provides-Extra: tornado +Provides-Extra: gthread +Provides-Extra: eventlet +Provides-Extra: gevent +Provides-Extra: eventlet +Requires-Dist: eventlet (>=0.9.7); extra == 'eventlet' +Provides-Extra: gevent +Requires-Dist: gevent (>=0.13); extra == 'gevent' +Provides-Extra: gthread +Provides-Extra: tornado +Requires-Dist: tornado (>=0.2); extra == 'tornado' + +Gunicorn +-------- + +.. image:: https://img.shields.io/pypi/v/gunicorn.svg?style=flat + :alt: PyPI version + :target: https://pypi.python.org/pypi/gunicorn + +.. image:: https://img.shields.io/pypi/pyversions/gunicorn.svg + :alt: Supported Python versions + :target: https://pypi.python.org/pypi/gunicorn + +.. image:: https://travis-ci.org/benoitc/gunicorn.svg?branch=master + :alt: Build Status + :target: https://travis-ci.org/benoitc/gunicorn + +Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX. It's a pre-fork +worker model ported from Ruby's Unicorn_ project. The Gunicorn server is broadly +compatible with various web frameworks, simply implemented, light on server +resource usage, and fairly speedy. + +Feel free to join us in `#gunicorn`_ on Freenode_. + +Documentation +------------- + +The documentation is hosted at http://docs.gunicorn.org. + +Installation +------------ + +Gunicorn requires **Python 2.x >= 2.6** or **Python 3.x >= 3.2**. + +Install from PyPI:: + + $ pip install gunicorn + + +Usage +----- + +Basic usage:: + + $ gunicorn [OPTIONS] APP_MODULE + +Where ``APP_MODULE`` is of the pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. The +module name can be a full dotted path. The variable name refers to a WSGI +callable that should be found in the specified module. + +Example with test app:: + + $ cd examples + $ gunicorn --workers=2 test:app + + +License +------- + +Gunicorn is released under the MIT License. See the LICENSE_ file for more +details. + +.. _Unicorn: https://bogomips.org/unicorn/ +.. _`#gunicorn`: https://webchat.freenode.net/?channels=gunicorn +.. _Freenode: https://freenode.net/ +.. _LICENSE: https://github.com/benoitc/gunicorn/blob/master/LICENSE + + diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD new file mode 100644 index 0000000..6166ec9 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD @@ -0,0 +1,89 @@ +gunicorn/__init__.py,sha256=kRm2HQJytwQi-xmyUfM0cOMyls23DfFDgGFbGI2Gj68,255 +gunicorn/_compat.py,sha256=5cXb6vMfVzInDq-AHNyZfsK-UG5NetDn62nPfqylHSU,9355 +gunicorn/arbiter.py,sha256=AbJNSFnTmx9Qd-vZAqEH3y5fz8ydPmyli_BERNIwdyE,20158 +gunicorn/argparse_compat.py,sha256=gsHDGwo4BSJWHdiaEXy0Emr96NKC0LDYmK5nB7PE8Qc,87791 +gunicorn/config.py,sha256=wYeAJFMweU3FXNF4BdfgZzPC94vUXUnuYgI6lNk-5_U,53420 +gunicorn/debug.py,sha256=UUw-eteLEm_OQ98D6K3XtDjx4Dya2H35zdiu8z7F7uc,2289 +gunicorn/errors.py,sha256=JlDBjag90gMiRwLHG3xzEJzDOntSl1iM32R277-U6j0,919 +gunicorn/glogging.py,sha256=bvnX-sky6HgqJor2JZ9VKZZzT4uh_yOgknkYegB7D7Y,15581 +gunicorn/pidfile.py,sha256=_69tsfF1aHklrMrJe2sHERovMduRByVTv99my7yQ874,2357 +gunicorn/reloader.py,sha256=CPNfYAAvJHazX3NAM7qysSRt0fpiHBGPqBlB0tYKhxs,3839 +gunicorn/selectors.py,sha256=14_UESrpE3AQKXWKeeAUG9vBTzJ0yTYDGtEo6xOtlDY,18997 +gunicorn/six.py,sha256=6N-6RCENPfBtMpN5UmgDfDKmJebbbuPu_Dk3Zf8ngww,27344 +gunicorn/sock.py,sha256=gX2FsdsOGMCtSHbDXn7lsiYYYRc3roQklIJLip1oZQo,6019 +gunicorn/systemd.py,sha256=ffhv17cdv-hDeFAJi1eAVtJskkVciV6cQU75Q2oplqg,1362 +gunicorn/util.py,sha256=Ns_a8Pf7MkaEi0KbV3GsP9aVQ2a_S45EjSE6Iyg2tYU,16229 +gunicorn/app/__init__.py,sha256=GuqstqdkizeV4HRbd8aGMBn0Q8IDOyRU1wMMNqNe5GY,127 +gunicorn/app/base.py,sha256=LKxyziLMPNlK3qm6dPMieELBqfLfmwBFnn9SB-KBogE,6652 +gunicorn/app/pasterapp.py,sha256=AGzZnUpcpw8O8KrizxTgdJBZ4lQdrHgsV0gdx7FVTs8,6046 +gunicorn/app/wsgiapp.py,sha256=ny71qjegQHl_bGMjNfq_aemPrmGEpH2bMRIdph6bj4Q,1870 +gunicorn/http/__init__.py,sha256=b4TF3x5F0VYOPTOeNYwRGR1EYHBaPMhZRMoNeuD5-n0,277 +gunicorn/http/_sendfile.py,sha256=Eqd-s3HlvLuyfGjqaH_Jk72cAtEV8hQv5tb1M1AqcBU,2217 +gunicorn/http/body.py,sha256=MmlZpj_6oRPj3oPVSMQZr0X3KH6ikntxDnVcLgfekZs,7345 +gunicorn/http/errors.py,sha256=sNjF2lm4m2qyZ9l95_U33FRxPXpxXzjnZyYqWS-hxd4,2850 +gunicorn/http/message.py,sha256=G5po0upwbrTyIggb_IEAItIjSi_aDoWYLPQ62o8pOI4,12257 +gunicorn/http/parser.py,sha256=IRMvp0veP4wL8Z4vgNV72CPydCNPdNNIy9u-DlDvvSo,1294 +gunicorn/http/unreader.py,sha256=s4kDW5euiJPsDuHzCqFXUtHCApqIxpShb9dtAyjJw9Y,2019 +gunicorn/http/wsgi.py,sha256=SETzcFoLggub2aMuGduTVELBwJGg9YvvDbkiFbugkwU,12856 +gunicorn/instrument/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +gunicorn/instrument/statsd.py,sha256=5xueDuTZMFtmS8ayGT4sU_OyB9qkEv4Agk-eJwAmhJM,4434 +gunicorn/workers/__init__.py,sha256=arPaAM8HxcK39L2dmDzmMhpK9bsyLOJymuCcBz_qqw0,774 +gunicorn/workers/_gaiohttp.py,sha256=llho90CjwpeAB9ehrYeGmD9VZZAPdcNpVwnrBA3GEZA,5079 +gunicorn/workers/base.py,sha256=nzo4KfCQkO3Y2HKuKVk-xInZUiYay_A5B9e_9NVXU28,9121 +gunicorn/workers/base_async.py,sha256=54VkS3S_wrFD7v3jInhFfkeBhaPnV5UN-cu-i5MoXkc,5575 +gunicorn/workers/gaiohttp.py,sha256=3rhXky6APkhI0D9nwXlogLo_Jd9v98CiEuCy9inzCU4,823 +gunicorn/workers/geventlet.py,sha256=mE-Zw3zh8lOZVaprXcfaoBMmwKeDj6sZzdjmgIsvHXw,4258 +gunicorn/workers/ggevent.py,sha256=OV5KCJ3qlJP5E46sjyWQKGbQ5xGR2SOrZlEtLhIB89s,7412 +gunicorn/workers/gthread.py,sha256=HIoWuylHZfH1wlSh4eZ8wxo1kQ5abvdUaFfKfIsgQvI,12009 +gunicorn/workers/gtornado.py,sha256=LtBWnEX7MNpeGX-YmlBoV1_OOhjkdytFmt1pzOlRPZk,5044 +gunicorn/workers/sync.py,sha256=_vd1JATNLG4MgJppNJG5KWBIzLGYqRzhEAQVz9H11LI,7153 +gunicorn/workers/workertmp.py,sha256=6QINPBrriLvezgkC_hclOOeXLi_owMt_SOA5KPEIN-A,1459 +gunicorn-19.9.0.dist-info/LICENSE.txt,sha256=eJ_hG5Lhyr-890S1_MOSyb1cZ5hgOk6J-SW2M3mE0d8,1136 +gunicorn-19.9.0.dist-info/METADATA,sha256=SBjzTcJcbKUR9ev_rvypyWJYU0qgHvm8KzgfG6FtniE,3388 +gunicorn-19.9.0.dist-info/RECORD,, +gunicorn-19.9.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +gunicorn-19.9.0.dist-info/entry_points.txt,sha256=XeFINKRdSUKwJwaVSolO24PuV_YeO71IMF-rOra5JO8,184 +gunicorn-19.9.0.dist-info/top_level.txt,sha256=cdMaa2yhxb8do-WioY9qRHUCfwf55YztjwQCncaInoE,9 +../../../bin/gunicorn,sha256=7yu1I7Rtn3C9Bl_xSluXZLIB0F6-0oADZ_RfNlEwpLg,245 +../../../bin/gunicorn_paster,sha256=GZ9v1m5WHSh7Yup93ysPfhsFUDkSsv-YN-tXToR1k9k,247 +gunicorn-19.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +gunicorn/__pycache__/selectors.cpython-36.pyc,, +gunicorn/__pycache__/_compat.cpython-36.pyc,, +gunicorn/__pycache__/__init__.cpython-36.pyc,, +gunicorn/__pycache__/systemd.cpython-36.pyc,, +gunicorn/__pycache__/pidfile.cpython-36.pyc,, +gunicorn/__pycache__/six.cpython-36.pyc,, +gunicorn/__pycache__/util.cpython-36.pyc,, +gunicorn/__pycache__/glogging.cpython-36.pyc,, +gunicorn/__pycache__/errors.cpython-36.pyc,, +gunicorn/__pycache__/debug.cpython-36.pyc,, +gunicorn/__pycache__/config.cpython-36.pyc,, +gunicorn/__pycache__/sock.cpython-36.pyc,, +gunicorn/__pycache__/reloader.cpython-36.pyc,, +gunicorn/__pycache__/argparse_compat.cpython-36.pyc,, +gunicorn/__pycache__/arbiter.cpython-36.pyc,, +gunicorn/app/__pycache__/base.cpython-36.pyc,, +gunicorn/app/__pycache__/pasterapp.cpython-36.pyc,, +gunicorn/app/__pycache__/__init__.cpython-36.pyc,, +gunicorn/app/__pycache__/wsgiapp.cpython-36.pyc,, +gunicorn/http/__pycache__/parser.cpython-36.pyc,, +gunicorn/http/__pycache__/__init__.cpython-36.pyc,, +gunicorn/http/__pycache__/message.cpython-36.pyc,, +gunicorn/http/__pycache__/body.cpython-36.pyc,, +gunicorn/http/__pycache__/_sendfile.cpython-36.pyc,, +gunicorn/http/__pycache__/wsgi.cpython-36.pyc,, +gunicorn/http/__pycache__/errors.cpython-36.pyc,, +gunicorn/http/__pycache__/unreader.cpython-36.pyc,, +gunicorn/instrument/__pycache__/__init__.cpython-36.pyc,, +gunicorn/instrument/__pycache__/statsd.cpython-36.pyc,, +gunicorn/workers/__pycache__/base_async.cpython-36.pyc,, +gunicorn/workers/__pycache__/base.cpython-36.pyc,, +gunicorn/workers/__pycache__/gaiohttp.cpython-36.pyc,, +gunicorn/workers/__pycache__/workertmp.cpython-36.pyc,, +gunicorn/workers/__pycache__/geventlet.cpython-36.pyc,, +gunicorn/workers/__pycache__/gtornado.cpython-36.pyc,, +gunicorn/workers/__pycache__/sync.cpython-36.pyc,, +gunicorn/workers/__pycache__/__init__.cpython-36.pyc,, +gunicorn/workers/__pycache__/_gaiohttp.cpython-36.pyc,, +gunicorn/workers/__pycache__/gthread.cpython-36.pyc,, +gunicorn/workers/__pycache__/ggevent.cpython-36.pyc,, diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL new file mode 100644 index 0000000..1316c41 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt new file mode 100644 index 0000000..d5b5aa1 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt @@ -0,0 +1,8 @@ + + [console_scripts] + gunicorn=gunicorn.app.wsgiapp:run + gunicorn_paster=gunicorn.app.pasterapp:run + + [paste.server_runner] + main=gunicorn.app.pasterapp:paste_server + \ No newline at end of file diff --git a/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt new file mode 100644 index 0000000..8f22dcc --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt @@ -0,0 +1 @@ +gunicorn diff --git a/venv/lib/python3.6/site-packages/gunicorn/__init__.py b/venv/lib/python3.6/site-packages/gunicorn/__init__.py new file mode 100644 index 0000000..7820479 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +version_info = (19, 9, 0) +__version__ = ".".join([str(v) for v in version_info]) +SERVER_SOFTWARE = "gunicorn/%s" % __version__ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/__init__.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..642097d Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/_compat.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/_compat.cpython-36.pyc new file mode 100644 index 0000000..32f8ff6 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/_compat.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/arbiter.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/arbiter.cpython-36.pyc new file mode 100644 index 0000000..ecd7abe Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/arbiter.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/argparse_compat.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/argparse_compat.cpython-36.pyc new file mode 100644 index 0000000..7b035d1 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/argparse_compat.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/config.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/config.cpython-36.pyc new file mode 100644 index 0000000..179f75a Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/config.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/debug.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/debug.cpython-36.pyc new file mode 100644 index 0000000..f2946f5 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/debug.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/errors.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/errors.cpython-36.pyc new file mode 100644 index 0000000..bf2e035 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/errors.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/glogging.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/glogging.cpython-36.pyc new file mode 100644 index 0000000..67a46ed Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/glogging.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/pidfile.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/pidfile.cpython-36.pyc new file mode 100644 index 0000000..0da94b1 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/pidfile.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/reloader.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/reloader.cpython-36.pyc new file mode 100644 index 0000000..286ec3c Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/reloader.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/selectors.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/selectors.cpython-36.pyc new file mode 100644 index 0000000..93c787e Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/selectors.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/six.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/six.cpython-36.pyc new file mode 100644 index 0000000..7666d61 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/six.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/sock.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/sock.cpython-36.pyc new file mode 100644 index 0000000..2c14004 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/sock.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/systemd.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/systemd.cpython-36.pyc new file mode 100644 index 0000000..21fd000 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/systemd.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/__pycache__/util.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/util.cpython-36.pyc new file mode 100644 index 0000000..f3510cb Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/__pycache__/util.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/_compat.py b/venv/lib/python3.6/site-packages/gunicorn/_compat.py new file mode 100644 index 0000000..39dbfdf --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/_compat.py @@ -0,0 +1,298 @@ +import sys + +from gunicorn import six + +PY26 = (sys.version_info[:2] == (2, 6)) +PY33 = (sys.version_info >= (3, 3)) + + +def _check_if_pyc(fname): + """Return True if the extension is .pyc, False if .py + and None if otherwise""" + from imp import find_module + from os.path import realpath, dirname, basename, splitext + + # Normalize the file-path for the find_module() + filepath = realpath(fname) + dirpath = dirname(filepath) + module_name = splitext(basename(filepath))[0] + + # Validate and fetch + try: + fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath]) + except ImportError: + raise IOError("Cannot find config file. " + "Path maybe incorrect! : {0}".format(filepath)) + return pytype, fileobj, fullpath + + +def _get_codeobj(pyfile): + """ Returns the code object, given a python file """ + from imp import PY_COMPILED, PY_SOURCE + + result, fileobj, fullpath = _check_if_pyc(pyfile) + + # WARNING: + # fp.read() can blowup if the module is extremely large file. + # Lookout for overflow errors. + try: + data = fileobj.read() + finally: + fileobj.close() + + # This is a .pyc file. Treat accordingly. + if result is PY_COMPILED: + # .pyc format is as follows: + # 0 - 4 bytes: Magic number, which changes with each create of .pyc file. + # First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n". + # 4 - 8 bytes: Datetime value, when the .py was last changed. + # 8 - EOF: Marshalled code object data. + # So to get code object, just read the 8th byte onwards till EOF, and + # UN-marshal it. + import marshal + code_obj = marshal.loads(data[8:]) + + elif result is PY_SOURCE: + # This is a .py file. + code_obj = compile(data, fullpath, 'exec') + + else: + # Unsupported extension + raise Exception("Input file is unknown format: {0}".format(fullpath)) + + # Return code object + return code_obj + +if six.PY3: + def execfile_(fname, *args): + if fname.endswith(".pyc"): + code = _get_codeobj(fname) + else: + code = compile(open(fname, 'rb').read(), fname, 'exec') + return six.exec_(code, *args) + + def bytes_to_str(b): + if isinstance(b, six.text_type): + return b + return str(b, 'latin1') + + import urllib.parse + + def unquote_to_wsgi_str(string): + return _unquote_to_bytes(string).decode('latin-1') + + _unquote_to_bytes = urllib.parse.unquote_to_bytes + +else: + def execfile_(fname, *args): + """ Overriding PY2 execfile() implementation to support .pyc files """ + if fname.endswith(".pyc"): + return six.exec_(_get_codeobj(fname), *args) + return execfile(fname, *args) + + def bytes_to_str(s): + if isinstance(s, unicode): + return s.encode('utf-8') + return s + + import urllib + unquote_to_wsgi_str = urllib.unquote + + +# The following code adapted from trollius.py33_exceptions +def _wrap_error(exc, mapping, key): + if key not in mapping: + return + new_err_cls = mapping[key] + new_err = new_err_cls(*exc.args) + + # raise a new exception with the original traceback + six.reraise(new_err_cls, new_err, + exc.__traceback__ if hasattr(exc, '__traceback__') else sys.exc_info()[2]) + +if PY33: + import builtins + + BlockingIOError = builtins.BlockingIOError + BrokenPipeError = builtins.BrokenPipeError + ChildProcessError = builtins.ChildProcessError + ConnectionRefusedError = builtins.ConnectionRefusedError + ConnectionResetError = builtins.ConnectionResetError + InterruptedError = builtins.InterruptedError + ConnectionAbortedError = builtins.ConnectionAbortedError + PermissionError = builtins.PermissionError + FileNotFoundError = builtins.FileNotFoundError + ProcessLookupError = builtins.ProcessLookupError + + def wrap_error(func, *args, **kw): + return func(*args, **kw) +else: + import errno + import select + import socket + + class BlockingIOError(OSError): + pass + + class BrokenPipeError(OSError): + pass + + class ChildProcessError(OSError): + pass + + class ConnectionRefusedError(OSError): + pass + + class InterruptedError(OSError): + pass + + class ConnectionResetError(OSError): + pass + + class ConnectionAbortedError(OSError): + pass + + class PermissionError(OSError): + pass + + class FileNotFoundError(OSError): + pass + + class ProcessLookupError(OSError): + pass + + _MAP_ERRNO = { + errno.EACCES: PermissionError, + errno.EAGAIN: BlockingIOError, + errno.EALREADY: BlockingIOError, + errno.ECHILD: ChildProcessError, + errno.ECONNABORTED: ConnectionAbortedError, + errno.ECONNREFUSED: ConnectionRefusedError, + errno.ECONNRESET: ConnectionResetError, + errno.EINPROGRESS: BlockingIOError, + errno.EINTR: InterruptedError, + errno.ENOENT: FileNotFoundError, + errno.EPERM: PermissionError, + errno.EPIPE: BrokenPipeError, + errno.ESHUTDOWN: BrokenPipeError, + errno.EWOULDBLOCK: BlockingIOError, + errno.ESRCH: ProcessLookupError, + } + + def wrap_error(func, *args, **kw): + """ + Wrap socket.error, IOError, OSError, select.error to raise new specialized + exceptions of Python 3.3 like InterruptedError (PEP 3151). + """ + try: + return func(*args, **kw) + except (socket.error, IOError, OSError) as exc: + if hasattr(exc, 'winerror'): + _wrap_error(exc, _MAP_ERRNO, exc.winerror) + # _MAP_ERRNO does not contain all Windows errors. + # For some errors like "file not found", exc.errno should + # be used (ex: ENOENT). + _wrap_error(exc, _MAP_ERRNO, exc.errno) + raise + except select.error as exc: + if exc.args: + _wrap_error(exc, _MAP_ERRNO, exc.args[0]) + raise + +if PY26: + from urlparse import ( + _parse_cache, MAX_CACHE_SIZE, clear_cache, _splitnetloc, SplitResult, + scheme_chars, + ) + + def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" + allow_fragments = bool(allow_fragments) + key = url, scheme, allow_fragments, type(url), type(scheme) + cached = _parse_cache.get(key, None) + if cached: + return cached + if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth + clear_cache() + netloc = query = fragment = '' + i = url.find(':') + if i > 0: + if url[:i] == 'http': # optimize the common case + scheme = url[:i].lower() + url = url[i+1:] + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return v + for c in url[:i]: + if c not in scheme_chars: + break + else: + # make sure "url" is not actually a port number (in which case + # "scheme" is really part of the path) + rest = url[i+1:] + if not rest or any(c not in '0123456789' for c in rest): + # not a port number + scheme, url = url[:i].lower(), rest + + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return v + +else: + from gunicorn.six.moves.urllib.parse import urlsplit + + +import inspect + +if hasattr(inspect, 'signature'): + positionals = ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ) + + def get_arity(f): + sig = inspect.signature(f) + arity = 0 + + for param in sig.parameters.values(): + if param.kind in positionals: + arity += 1 + + return arity +else: + def get_arity(f): + return len(inspect.getargspec(f)[0]) + + +try: + import html + + def html_escape(s): + return html.escape(s) +except ImportError: + import cgi + + def html_escape(s): + return cgi.escape(s, quote=True) diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/__init__.py b/venv/lib/python3.6/site-packages/gunicorn/app/__init__.py new file mode 100644 index 0000000..87f0611 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/app/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/__init__.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..040397e Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/base.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/base.cpython-36.pyc new file mode 100644 index 0000000..0558959 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/base.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/pasterapp.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/pasterapp.cpython-36.pyc new file mode 100644 index 0000000..b908f39 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/pasterapp.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/wsgiapp.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/wsgiapp.cpython-36.pyc new file mode 100644 index 0000000..de34fd9 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/app/__pycache__/wsgiapp.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/base.py b/venv/lib/python3.6/site-packages/gunicorn/app/base.py new file mode 100644 index 0000000..e468c95 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/app/base.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. +from __future__ import print_function + +import os +import sys +import traceback + +from gunicorn._compat import execfile_ +from gunicorn import util +from gunicorn.arbiter import Arbiter +from gunicorn.config import Config, get_default_config_file +from gunicorn import debug + +class BaseApplication(object): + """ + An application interface for configuring and loading + the various necessities for any given web framework. + """ + def __init__(self, usage=None, prog=None): + self.usage = usage + self.cfg = None + self.callable = None + self.prog = prog + self.logger = None + self.do_load_config() + + def do_load_config(self): + """ + Loads the configuration + """ + try: + self.load_default_config() + self.load_config() + except Exception as e: + print("\nError: %s" % str(e), file=sys.stderr) + sys.stderr.flush() + sys.exit(1) + + def load_default_config(self): + # init configuration + self.cfg = Config(self.usage, prog=self.prog) + + def init(self, parser, opts, args): + raise NotImplementedError + + def load(self): + raise NotImplementedError + + def load_config(self): + """ + This method is used to load the configuration from one or several input(s). + Custom Command line, configuration file. + You have to override this method in your class. + """ + raise NotImplementedError + + def reload(self): + self.do_load_config() + if self.cfg.spew: + debug.spew() + + def wsgi(self): + if self.callable is None: + self.callable = self.load() + return self.callable + + def run(self): + try: + Arbiter(self).run() + except RuntimeError as e: + print("\nError: %s\n" % e, file=sys.stderr) + sys.stderr.flush() + sys.exit(1) + + +class Application(BaseApplication): + + # 'init' and 'load' methods are implemented by WSGIApplication. + # pylint: disable=abstract-method + + def chdir(self): + # chdir to the configured path before loading, + # default is the current dir + os.chdir(self.cfg.chdir) + + # add the path to sys.path + if self.cfg.chdir not in sys.path: + sys.path.insert(0, self.cfg.chdir) + + def get_config_from_filename(self, filename): + + if not os.path.exists(filename): + raise RuntimeError("%r doesn't exist" % filename) + + cfg = { + "__builtins__": __builtins__, + "__name__": "__config__", + "__file__": filename, + "__doc__": None, + "__package__": None + } + try: + execfile_(filename, cfg, cfg) + except Exception: + print("Failed to read config file: %s" % filename, file=sys.stderr) + traceback.print_exc() + sys.stderr.flush() + sys.exit(1) + + return cfg + + def get_config_from_module_name(self, module_name): + return vars(util.import_module(module_name)) + + def load_config_from_module_name_or_filename(self, location): + """ + Loads the configuration file: the file is a python file, otherwise raise an RuntimeError + Exception or stop the process if the configuration file contains a syntax error. + """ + + if location.startswith("python:"): + module_name = location[len("python:"):] + cfg = self.get_config_from_module_name(module_name) + else: + if location.startswith("file:"): + filename = location[len("file:"):] + else: + filename = location + cfg = self.get_config_from_filename(filename) + + for k, v in cfg.items(): + # Ignore unknown names + if k not in self.cfg.settings: + continue + try: + self.cfg.set(k.lower(), v) + except: + print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr) + sys.stderr.flush() + raise + + return cfg + + def load_config_from_file(self, filename): + return self.load_config_from_module_name_or_filename(location=filename) + + def load_config(self): + # parse console args + parser = self.cfg.parser() + args = parser.parse_args() + + # optional settings from apps + cfg = self.init(parser, args, args.args) + + # set up import paths and follow symlinks + self.chdir() + + # Load up the any app specific configuration + if cfg: + for k, v in cfg.items(): + self.cfg.set(k.lower(), v) + + env_args = parser.parse_args(self.cfg.get_cmd_args_from_env()) + + if args.config: + self.load_config_from_file(args.config) + elif env_args.config: + self.load_config_from_file(env_args.config) + else: + default_config = get_default_config_file() + if default_config is not None: + self.load_config_from_file(default_config) + + # Load up environment configuration + for k, v in vars(env_args).items(): + if v is None: + continue + if k == "args": + continue + self.cfg.set(k.lower(), v) + + # Lastly, update the configuration with any command line settings. + for k, v in vars(args).items(): + if v is None: + continue + if k == "args": + continue + self.cfg.set(k.lower(), v) + + # current directory might be changed by the config now + # set up import paths and follow symlinks + self.chdir() + + def run(self): + if self.cfg.check_config: + try: + self.load() + except: + msg = "\nError while loading the application:\n" + print(msg, file=sys.stderr) + traceback.print_exc() + sys.stderr.flush() + sys.exit(1) + sys.exit(0) + + if self.cfg.spew: + debug.spew() + + if self.cfg.daemon: + util.daemonize(self.cfg.enable_stdio_inheritance) + + # set python paths + if self.cfg.pythonpath: + paths = self.cfg.pythonpath.split(",") + for path in paths: + pythonpath = os.path.abspath(path) + if pythonpath not in sys.path: + sys.path.insert(0, pythonpath) + + super(Application, self).run() diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/pasterapp.py b/venv/lib/python3.6/site-packages/gunicorn/app/pasterapp.py new file mode 100644 index 0000000..dbcd339 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/app/pasterapp.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. +from __future__ import print_function + +# pylint: skip-file + +import os +import pkg_resources +import sys + +try: + import configparser as ConfigParser +except ImportError: + import ConfigParser + +from paste.deploy import loadapp, loadwsgi +SERVER = loadwsgi.SERVER + +from gunicorn.app.base import Application +from gunicorn.config import Config, get_default_config_file +from gunicorn import util + + +def _has_logging_config(paste_file): + cfg_parser = ConfigParser.ConfigParser() + cfg_parser.read([paste_file]) + return cfg_parser.has_section('loggers') + + +def paste_config(gconfig, config_url, relative_to, global_conf=None): + # add entry to pkg_resources + sys.path.insert(0, relative_to) + pkg_resources.working_set.add_entry(relative_to) + + config_url = config_url.split('#')[0] + cx = loadwsgi.loadcontext(SERVER, config_url, relative_to=relative_to, + global_conf=global_conf) + gc, lc = cx.global_conf.copy(), cx.local_conf.copy() + cfg = {} + + host, port = lc.pop('host', ''), lc.pop('port', '') + if host and port: + cfg['bind'] = '%s:%s' % (host, port) + elif host: + cfg['bind'] = host.split(',') + + cfg['default_proc_name'] = gc.get('__file__') + + # init logging configuration + config_file = config_url.split(':')[1] + if _has_logging_config(config_file): + cfg.setdefault('logconfig', config_file) + + for k, v in gc.items(): + if k not in gconfig.settings: + continue + cfg[k] = v + + for k, v in lc.items(): + if k not in gconfig.settings: + continue + cfg[k] = v + + return cfg + + +def load_pasteapp(config_url, relative_to, global_conf=None): + return loadapp(config_url, relative_to=relative_to, + global_conf=global_conf) + +class PasterBaseApplication(Application): + gcfg = None + + def app_config(self): + return paste_config(self.cfg, self.cfgurl, self.relpath, + global_conf=self.gcfg) + + def load_config(self): + super(PasterBaseApplication, self).load_config() + + # reload logging conf + if hasattr(self, "cfgfname"): + parser = ConfigParser.ConfigParser() + parser.read([self.cfgfname]) + if parser.has_section('loggers'): + from logging.config import fileConfig + config_file = os.path.abspath(self.cfgfname) + fileConfig(config_file, dict(__file__=config_file, + here=os.path.dirname(config_file))) + + +class PasterApplication(PasterBaseApplication): + + def init(self, parser, opts, args): + if len(args) != 1: + parser.error("No application name specified.") + + cwd = util.getcwd() + cfgfname = os.path.normpath(os.path.join(cwd, args[0])) + cfgfname = os.path.abspath(cfgfname) + if not os.path.exists(cfgfname): + parser.error("Config file not found: %s" % cfgfname) + + self.cfgurl = 'config:%s' % cfgfname + self.relpath = os.path.dirname(cfgfname) + self.cfgfname = cfgfname + + sys.path.insert(0, self.relpath) + pkg_resources.working_set.add_entry(self.relpath) + + return self.app_config() + + def load(self): + # chdir to the configured path before loading, + # default is the current dir + os.chdir(self.cfg.chdir) + + return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.gcfg) + + +class PasterServerApplication(PasterBaseApplication): + + def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, **kwargs): + # pylint: disable=super-init-not-called + self.cfg = Config() + self.gcfg = gcfg # need to hold this for app_config + self.app = app + self.callable = None + + gcfg = gcfg or {} + cfgfname = gcfg.get("__file__") + if cfgfname is not None: + self.cfgurl = 'config:%s' % cfgfname + self.relpath = os.path.dirname(cfgfname) + self.cfgfname = cfgfname + + cfg = kwargs.copy() + + if port and not host.startswith("unix:"): + bind = "%s:%s" % (host, port) + else: + bind = host + cfg["bind"] = bind.split(',') + + if gcfg: + for k, v in gcfg.items(): + cfg[k] = v + cfg["default_proc_name"] = cfg['__file__'] + + try: + for k, v in cfg.items(): + if k.lower() in self.cfg.settings and v is not None: + self.cfg.set(k.lower(), v) + except Exception as e: + print("\nConfig error: %s" % str(e), file=sys.stderr) + sys.stderr.flush() + sys.exit(1) + + if cfg.get("config"): + self.load_config_from_file(cfg["config"]) + else: + default_config = get_default_config_file() + if default_config is not None: + self.load_config_from_file(default_config) + + def load(self): + return self.app + + +def run(): + """\ + The ``gunicorn_paster`` command for launching Paster compatible + applications like Pylons or Turbogears2 + """ + util.warn("""This command is deprecated. + + You should now use the `--paste` option. Ex.: + + gunicorn --paste development.ini + """) + + from gunicorn.app.pasterapp import PasterApplication + PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run() + + +def paste_server(app, gcfg=None, host="127.0.0.1", port=None, **kwargs): + """\ + A paster server. + + Then entry point in your paster ini file should looks like this: + + [server:main] + use = egg:gunicorn#main + host = 127.0.0.1 + port = 5000 + + """ + + util.warn("""This command is deprecated. + + You should now use the `--paste` option. Ex.: + + gunicorn --paste development.ini + """) + + from gunicorn.app.pasterapp import PasterServerApplication + PasterServerApplication(app, gcfg=gcfg, host=host, port=port, **kwargs).run() diff --git a/venv/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py b/venv/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py new file mode 100644 index 0000000..2205944 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import os + +from gunicorn.errors import ConfigError +from gunicorn.app.base import Application +from gunicorn import util + + +class WSGIApplication(Application): + def init(self, parser, opts, args): + if opts.paste: + app_name = 'main' + path = opts.paste + if '#' in path: + path, app_name = path.split('#') + path = os.path.abspath(os.path.normpath( + os.path.join(util.getcwd(), path))) + + if not os.path.exists(path): + raise ConfigError("%r not found" % path) + + # paste application, load the config + self.cfgurl = 'config:%s#%s' % (path, app_name) + self.relpath = os.path.dirname(path) + + from .pasterapp import paste_config + return paste_config(self.cfg, self.cfgurl, self.relpath) + + if len(args) < 1: + parser.error("No application module specified.") + + self.cfg.set("default_proc_name", args[0]) + self.app_uri = args[0] + + def load_wsgiapp(self): + # load the app + return util.import_app(self.app_uri) + + def load_pasteapp(self): + # load the paste app + from .pasterapp import load_pasteapp + return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.cfg.paste_global_conf) + + def load(self): + if self.cfg.paste is not None: + return self.load_pasteapp() + else: + return self.load_wsgiapp() + + +def run(): + """\ + The ``gunicorn`` command line runner for launching Gunicorn with + generic WSGI applications. + """ + from gunicorn.app.wsgiapp import WSGIApplication + WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run() + + +if __name__ == '__main__': + run() diff --git a/venv/lib/python3.6/site-packages/gunicorn/arbiter.py b/venv/lib/python3.6/site-packages/gunicorn/arbiter.py new file mode 100644 index 0000000..083ee6a --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/arbiter.py @@ -0,0 +1,646 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. +from __future__ import print_function + +import errno +import os +import random +import select +import signal +import sys +import time +import traceback + +from gunicorn.errors import HaltServer, AppImportError +from gunicorn.pidfile import Pidfile +from gunicorn import sock, systemd, util + +from gunicorn import __version__, SERVER_SOFTWARE + + +class Arbiter(object): + """ + Arbiter maintain the workers processes alive. It launches or + kills them if needed. It also manages application reloading + via SIGHUP/USR2. + """ + + # A flag indicating if a worker failed to + # to boot. If a worker process exist with + # this error code, the arbiter will terminate. + WORKER_BOOT_ERROR = 3 + + # A flag indicating if an application failed to be loaded + APP_LOAD_ERROR = 4 + + START_CTX = {} + + LISTENERS = [] + WORKERS = {} + PIPE = [] + + # I love dynamic languages + SIG_QUEUE = [] + SIGNALS = [getattr(signal, "SIG%s" % x) + for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()] + SIG_NAMES = dict( + (getattr(signal, name), name[3:].lower()) for name in dir(signal) + if name[:3] == "SIG" and name[3] != "_" + ) + + def __init__(self, app): + os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE + + self._num_workers = None + self._last_logged_active_worker_count = None + self.log = None + + self.setup(app) + + self.pidfile = None + self.systemd = False + self.worker_age = 0 + self.reexec_pid = 0 + self.master_pid = 0 + self.master_name = "Master" + + cwd = util.getcwd() + + args = sys.argv[:] + args.insert(0, sys.executable) + + # init start context + self.START_CTX = { + "args": args, + "cwd": cwd, + 0: sys.executable + } + + def _get_num_workers(self): + return self._num_workers + + def _set_num_workers(self, value): + old_value = self._num_workers + self._num_workers = value + self.cfg.nworkers_changed(self, value, old_value) + num_workers = property(_get_num_workers, _set_num_workers) + + def setup(self, app): + self.app = app + self.cfg = app.cfg + + if self.log is None: + self.log = self.cfg.logger_class(app.cfg) + + # reopen files + if 'GUNICORN_FD' in os.environ: + self.log.reopen_files() + + self.worker_class = self.cfg.worker_class + self.address = self.cfg.address + self.num_workers = self.cfg.workers + self.timeout = self.cfg.timeout + self.proc_name = self.cfg.proc_name + + self.log.debug('Current configuration:\n{0}'.format( + '\n'.join( + ' {0}: {1}'.format(config, value.value) + for config, value + in sorted(self.cfg.settings.items(), + key=lambda setting: setting[1])))) + + # set enviroment' variables + if self.cfg.env: + for k, v in self.cfg.env.items(): + os.environ[k] = v + + if self.cfg.preload_app: + self.app.wsgi() + + def start(self): + """\ + Initialize the arbiter. Start listening and set pidfile if needed. + """ + self.log.info("Starting gunicorn %s", __version__) + + if 'GUNICORN_PID' in os.environ: + self.master_pid = int(os.environ.get('GUNICORN_PID')) + self.proc_name = self.proc_name + ".2" + self.master_name = "Master.2" + + self.pid = os.getpid() + if self.cfg.pidfile is not None: + pidname = self.cfg.pidfile + if self.master_pid != 0: + pidname += ".2" + self.pidfile = Pidfile(pidname) + self.pidfile.create(self.pid) + self.cfg.on_starting(self) + + self.init_signals() + + if not self.LISTENERS: + fds = None + listen_fds = systemd.listen_fds() + if listen_fds: + self.systemd = True + fds = range(systemd.SD_LISTEN_FDS_START, + systemd.SD_LISTEN_FDS_START + listen_fds) + + elif self.master_pid: + fds = [] + for fd in os.environ.pop('GUNICORN_FD').split(','): + fds.append(int(fd)) + + self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds) + + listeners_str = ",".join([str(l) for l in self.LISTENERS]) + self.log.debug("Arbiter booted") + self.log.info("Listening at: %s (%s)", listeners_str, self.pid) + self.log.info("Using worker: %s", self.cfg.worker_class_str) + + # check worker class requirements + if hasattr(self.worker_class, "check_config"): + self.worker_class.check_config(self.cfg, self.log) + + self.cfg.when_ready(self) + + def init_signals(self): + """\ + Initialize master signal handling. Most of the signals + are queued. Child signals only wake up the master. + """ + # close old PIPE + for p in self.PIPE: + os.close(p) + + # initialize the pipe + self.PIPE = pair = os.pipe() + for p in pair: + util.set_non_blocking(p) + util.close_on_exec(p) + + self.log.close_on_exec() + + # initialize all signals + for s in self.SIGNALS: + signal.signal(s, self.signal) + signal.signal(signal.SIGCHLD, self.handle_chld) + + def signal(self, sig, frame): + if len(self.SIG_QUEUE) < 5: + self.SIG_QUEUE.append(sig) + self.wakeup() + + def run(self): + "Main master loop." + self.start() + util._setproctitle("master [%s]" % self.proc_name) + + try: + self.manage_workers() + + while True: + self.maybe_promote_master() + + sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None + if sig is None: + self.sleep() + self.murder_workers() + self.manage_workers() + continue + + if sig not in self.SIG_NAMES: + self.log.info("Ignoring unknown signal: %s", sig) + continue + + signame = self.SIG_NAMES.get(sig) + handler = getattr(self, "handle_%s" % signame, None) + if not handler: + self.log.error("Unhandled signal: %s", signame) + continue + self.log.info("Handling signal: %s", signame) + handler() + self.wakeup() + except StopIteration: + self.halt() + except KeyboardInterrupt: + self.halt() + except HaltServer as inst: + self.halt(reason=inst.reason, exit_status=inst.exit_status) + except SystemExit: + raise + except Exception: + self.log.info("Unhandled exception in main loop", + exc_info=True) + self.stop(False) + if self.pidfile is not None: + self.pidfile.unlink() + sys.exit(-1) + + def handle_chld(self, sig, frame): + "SIGCHLD handling" + self.reap_workers() + self.wakeup() + + def handle_hup(self): + """\ + HUP handling. + - Reload configuration + - Start the new worker processes with a new configuration + - Gracefully shutdown the old worker processes + """ + self.log.info("Hang up: %s", self.master_name) + self.reload() + + def handle_term(self): + "SIGTERM handling" + raise StopIteration + + def handle_int(self): + "SIGINT handling" + self.stop(False) + raise StopIteration + + def handle_quit(self): + "SIGQUIT handling" + self.stop(False) + raise StopIteration + + def handle_ttin(self): + """\ + SIGTTIN handling. + Increases the number of workers by one. + """ + self.num_workers += 1 + self.manage_workers() + + def handle_ttou(self): + """\ + SIGTTOU handling. + Decreases the number of workers by one. + """ + if self.num_workers <= 1: + return + self.num_workers -= 1 + self.manage_workers() + + def handle_usr1(self): + """\ + SIGUSR1 handling. + Kill all workers by sending them a SIGUSR1 + """ + self.log.reopen_files() + self.kill_workers(signal.SIGUSR1) + + def handle_usr2(self): + """\ + SIGUSR2 handling. + Creates a new master/worker set as a slave of the current + master without affecting old workers. Use this to do live + deployment with the ability to backout a change. + """ + self.reexec() + + def handle_winch(self): + """SIGWINCH handling""" + if self.cfg.daemon: + self.log.info("graceful stop of workers") + self.num_workers = 0 + self.kill_workers(signal.SIGTERM) + else: + self.log.debug("SIGWINCH ignored. Not daemonized") + + def maybe_promote_master(self): + if self.master_pid == 0: + return + + if self.master_pid != os.getppid(): + self.log.info("Master has been promoted.") + # reset master infos + self.master_name = "Master" + self.master_pid = 0 + self.proc_name = self.cfg.proc_name + del os.environ['GUNICORN_PID'] + # rename the pidfile + if self.pidfile is not None: + self.pidfile.rename(self.cfg.pidfile) + # reset proctitle + util._setproctitle("master [%s]" % self.proc_name) + + def wakeup(self): + """\ + Wake up the arbiter by writing to the PIPE + """ + try: + os.write(self.PIPE[1], b'.') + except IOError as e: + if e.errno not in [errno.EAGAIN, errno.EINTR]: + raise + + def halt(self, reason=None, exit_status=0): + """ halt arbiter """ + self.stop() + self.log.info("Shutting down: %s", self.master_name) + if reason is not None: + self.log.info("Reason: %s", reason) + if self.pidfile is not None: + self.pidfile.unlink() + self.cfg.on_exit(self) + sys.exit(exit_status) + + def sleep(self): + """\ + Sleep until PIPE is readable or we timeout. + A readable PIPE means a signal occurred. + """ + try: + ready = select.select([self.PIPE[0]], [], [], 1.0) + if not ready[0]: + return + while os.read(self.PIPE[0], 1): + pass + except (select.error, OSError) as e: + # TODO: select.error is a subclass of OSError since Python 3.3. + error_number = getattr(e, 'errno', e.args[0]) + if error_number not in [errno.EAGAIN, errno.EINTR]: + raise + except KeyboardInterrupt: + sys.exit() + + def stop(self, graceful=True): + """\ + Stop workers + + :attr graceful: boolean, If True (the default) workers will be + killed gracefully (ie. trying to wait for the current connection) + """ + + unlink = self.reexec_pid == self.master_pid == 0 and not self.systemd + sock.close_sockets(self.LISTENERS, unlink) + + self.LISTENERS = [] + sig = signal.SIGTERM + if not graceful: + sig = signal.SIGQUIT + limit = time.time() + self.cfg.graceful_timeout + # instruct the workers to exit + self.kill_workers(sig) + # wait until the graceful timeout + while self.WORKERS and time.time() < limit: + time.sleep(0.1) + + self.kill_workers(signal.SIGKILL) + + def reexec(self): + """\ + Relaunch the master and workers. + """ + if self.reexec_pid != 0: + self.log.warning("USR2 signal ignored. Child exists.") + return + + if self.master_pid != 0: + self.log.warning("USR2 signal ignored. Parent exists.") + return + + master_pid = os.getpid() + self.reexec_pid = os.fork() + if self.reexec_pid != 0: + return + + self.cfg.pre_exec(self) + + environ = self.cfg.env_orig.copy() + environ['GUNICORN_PID'] = str(master_pid) + + if self.systemd: + environ['LISTEN_PID'] = str(os.getpid()) + environ['LISTEN_FDS'] = str(len(self.LISTENERS)) + else: + environ['GUNICORN_FD'] = ','.join( + str(l.fileno()) for l in self.LISTENERS) + + os.chdir(self.START_CTX['cwd']) + + # exec the process using the original environment + os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ) + + def reload(self): + old_address = self.cfg.address + + # reset old environment + for k in self.cfg.env: + if k in self.cfg.env_orig: + # reset the key to the value it had before + # we launched gunicorn + os.environ[k] = self.cfg.env_orig[k] + else: + # delete the value set by gunicorn + try: + del os.environ[k] + except KeyError: + pass + + # reload conf + self.app.reload() + self.setup(self.app) + + # reopen log files + self.log.reopen_files() + + # do we need to change listener ? + if old_address != self.cfg.address: + # close all listeners + for l in self.LISTENERS: + l.close() + # init new listeners + self.LISTENERS = sock.create_sockets(self.cfg, self.log) + listeners_str = ",".join([str(l) for l in self.LISTENERS]) + self.log.info("Listening at: %s", listeners_str) + + # do some actions on reload + self.cfg.on_reload(self) + + # unlink pidfile + if self.pidfile is not None: + self.pidfile.unlink() + + # create new pidfile + if self.cfg.pidfile is not None: + self.pidfile = Pidfile(self.cfg.pidfile) + self.pidfile.create(self.pid) + + # set new proc_name + util._setproctitle("master [%s]" % self.proc_name) + + # spawn new workers + for _ in range(self.cfg.workers): + self.spawn_worker() + + # manage workers + self.manage_workers() + + def murder_workers(self): + """\ + Kill unused/idle workers + """ + if not self.timeout: + return + workers = list(self.WORKERS.items()) + for (pid, worker) in workers: + try: + if time.time() - worker.tmp.last_update() <= self.timeout: + continue + except (OSError, ValueError): + continue + + if not worker.aborted: + self.log.critical("WORKER TIMEOUT (pid:%s)", pid) + worker.aborted = True + self.kill_worker(pid, signal.SIGABRT) + else: + self.kill_worker(pid, signal.SIGKILL) + + def reap_workers(self): + """\ + Reap workers to avoid zombie processes + """ + try: + while True: + wpid, status = os.waitpid(-1, os.WNOHANG) + if not wpid: + break + if self.reexec_pid == wpid: + self.reexec_pid = 0 + else: + # A worker was terminated. If the termination reason was + # that it could not boot, we'll shut it down to avoid + # infinite start/stop cycles. + exitcode = status >> 8 + if exitcode == self.WORKER_BOOT_ERROR: + reason = "Worker failed to boot." + raise HaltServer(reason, self.WORKER_BOOT_ERROR) + if exitcode == self.APP_LOAD_ERROR: + reason = "App failed to load." + raise HaltServer(reason, self.APP_LOAD_ERROR) + + worker = self.WORKERS.pop(wpid, None) + if not worker: + continue + worker.tmp.close() + self.cfg.child_exit(self, worker) + except OSError as e: + if e.errno != errno.ECHILD: + raise + + def manage_workers(self): + """\ + Maintain the number of workers by spawning or killing + as required. + """ + if len(self.WORKERS.keys()) < self.num_workers: + self.spawn_workers() + + workers = self.WORKERS.items() + workers = sorted(workers, key=lambda w: w[1].age) + while len(workers) > self.num_workers: + (pid, _) = workers.pop(0) + self.kill_worker(pid, signal.SIGTERM) + + active_worker_count = len(workers) + if self._last_logged_active_worker_count != active_worker_count: + self._last_logged_active_worker_count = active_worker_count + self.log.debug("{0} workers".format(active_worker_count), + extra={"metric": "gunicorn.workers", + "value": active_worker_count, + "mtype": "gauge"}) + + def spawn_worker(self): + self.worker_age += 1 + worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS, + self.app, self.timeout / 2.0, + self.cfg, self.log) + self.cfg.pre_fork(self, worker) + pid = os.fork() + if pid != 0: + worker.pid = pid + self.WORKERS[pid] = worker + return pid + + # Do not inherit the temporary files of other workers + for sibling in self.WORKERS.values(): + sibling.tmp.close() + + # Process Child + worker.pid = os.getpid() + try: + util._setproctitle("worker [%s]" % self.proc_name) + self.log.info("Booting worker with pid: %s", worker.pid) + self.cfg.post_fork(self, worker) + worker.init_process() + sys.exit(0) + except SystemExit: + raise + except AppImportError as e: + self.log.debug("Exception while loading the application", + exc_info=True) + print("%s" % e, file=sys.stderr) + sys.stderr.flush() + sys.exit(self.APP_LOAD_ERROR) + except: + self.log.exception("Exception in worker process") + if not worker.booted: + sys.exit(self.WORKER_BOOT_ERROR) + sys.exit(-1) + finally: + self.log.info("Worker exiting (pid: %s)", worker.pid) + try: + worker.tmp.close() + self.cfg.worker_exit(self, worker) + except: + self.log.warning("Exception during worker exit:\n%s", + traceback.format_exc()) + + def spawn_workers(self): + """\ + Spawn new workers as needed. + + This is where a worker process leaves the main loop + of the master process. + """ + + for _ in range(self.num_workers - len(self.WORKERS.keys())): + self.spawn_worker() + time.sleep(0.1 * random.random()) + + def kill_workers(self, sig): + """\ + Kill all workers with the signal `sig` + :attr sig: `signal.SIG*` value + """ + worker_pids = list(self.WORKERS.keys()) + for pid in worker_pids: + self.kill_worker(pid, sig) + + def kill_worker(self, pid, sig): + """\ + Kill a worker + + :attr pid: int, worker pid + :attr sig: `signal.SIG*` value + """ + try: + os.kill(pid, sig) + except OSError as e: + if e.errno == errno.ESRCH: + try: + worker = self.WORKERS.pop(pid) + worker.tmp.close() + self.cfg.worker_exit(self, worker) + return + except (KeyError, OSError): + return + raise diff --git a/venv/lib/python3.6/site-packages/gunicorn/argparse_compat.py b/venv/lib/python3.6/site-packages/gunicorn/argparse_compat.py new file mode 100644 index 0000000..32d948c --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/argparse_compat.py @@ -0,0 +1,2362 @@ +# Author: Steven J. Bethard . + +"""Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +""" + +__version__ = '1.2.1' +__all__ = [ + 'ArgumentParser', + 'ArgumentError', + 'ArgumentTypeError', + 'FileType', + 'HelpFormatter', + 'ArgumentDefaultsHelpFormatter', + 'RawDescriptionHelpFormatter', + 'RawTextHelpFormatter', + 'Namespace', + 'Action', + 'ONE_OR_MORE', + 'OPTIONAL', + 'PARSER', + 'REMAINDER', + 'SUPPRESS', + 'ZERO_OR_MORE', +] + + +import copy as _copy +import os as _os +import re as _re +import sys as _sys +import textwrap as _textwrap + +from gettext import gettext as _ + +try: + set +except NameError: + # for python < 2.4 compatibility (sets module is there since 2.3): + from sets import Set as set + +try: + basestring +except NameError: + basestring = str + +try: + sorted +except NameError: + # for python < 2.4 compatibility: + def sorted(iterable, reverse=False): + result = list(iterable) + result.sort() + if reverse: + result.reverse() + return result + + +def _callable(obj): + return hasattr(obj, '__call__') or hasattr(obj, '__bases__') + + +SUPPRESS = '==SUPPRESS==' + +OPTIONAL = '?' +ZERO_OR_MORE = '*' +ONE_OR_MORE = '+' +PARSER = 'A...' +REMAINDER = '...' +_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' + +# ============================= +# Utility functions and classes +# ============================= + +class _AttributeHolder(object): + """Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + """ + + def __repr__(self): + type_name = type(self).__name__ + arg_strings = [] + for arg in self._get_args(): + arg_strings.append(repr(arg)) + for name, value in self._get_kwargs(): + arg_strings.append('%s=%r' % (name, value)) + return '%s(%s)' % (type_name, ', '.join(arg_strings)) + + def _get_kwargs(self): + return sorted(self.__dict__.items()) + + def _get_args(self): + return [] + + +def _ensure_value(namespace, name, value): + if getattr(namespace, name, None) is None: + setattr(namespace, name, value) + return getattr(namespace, name) + + +# =============== +# Formatting Help +# =============== + +class HelpFormatter(object): + """Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def __init__(self, + prog, + indent_increment=2, + max_help_position=24, + width=None): + + # default setting for width + if width is None: + try: + width = int(_os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + + self._prog = prog + self._indent_increment = indent_increment + self._max_help_position = max_help_position + self._width = width + + self._current_indent = 0 + self._level = 0 + self._action_max_length = 0 + + self._root_section = self._Section(self, None) + self._current_section = self._root_section + + self._whitespace_matcher = _re.compile(r'\s+') + self._long_break_matcher = _re.compile(r'\n\n\n+') + + # =============================== + # Section and indentation methods + # =============================== + def _indent(self): + self._current_indent += self._indent_increment + self._level += 1 + + def _dedent(self): + self._current_indent -= self._indent_increment + assert self._current_indent >= 0, 'Indent decreased below 0.' + self._level -= 1 + + class _Section(object): + + def __init__(self, formatter, parent, heading=None): + self.formatter = formatter + self.parent = parent + self.heading = heading + self.items = [] + + def format_help(self): + # format the indented section + if self.parent is not None: + self.formatter._indent() + join = self.formatter._join_parts + for func, args in self.items: + func(*args) + item_help = join([func(*args) for func, args in self.items]) + if self.parent is not None: + self.formatter._dedent() + + # return nothing if the section was empty + if not item_help: + return '' + + # add the heading if the section was non-empty + if self.heading is not SUPPRESS and self.heading is not None: + current_indent = self.formatter._current_indent + heading = '%*s%s:\n' % (current_indent, '', self.heading) + else: + heading = '' + + # join the section-initial newline, the heading and the help + return join(['\n', heading, item_help, '\n']) + + def _add_item(self, func, args): + self._current_section.items.append((func, args)) + + # ======================== + # Message building methods + # ======================== + def start_section(self, heading): + self._indent() + section = self._Section(self, self._current_section, heading) + self._add_item(section.format_help, []) + self._current_section = section + + def end_section(self): + self._current_section = self._current_section.parent + self._dedent() + + def add_text(self, text): + if text is not SUPPRESS and text is not None: + self._add_item(self._format_text, [text]) + + def add_usage(self, usage, actions, groups, prefix=None): + if usage is not SUPPRESS: + args = usage, actions, groups, prefix + self._add_item(self._format_usage, args) + + def add_argument(self, action): + if action.help is not SUPPRESS: + + # find all invocations + get_invocation = self._format_action_invocation + invocations = [get_invocation(action)] + for subaction in self._iter_indented_subactions(action): + invocations.append(get_invocation(subaction)) + + # update the maximum item length + invocation_length = max([len(s) for s in invocations]) + action_length = invocation_length + self._current_indent + self._action_max_length = max(self._action_max_length, + action_length) + + # add the item to the list + self._add_item(self._format_action, [action]) + + def add_arguments(self, actions): + for action in actions: + self.add_argument(action) + + # ======================= + # Help-formatting methods + # ======================= + def format_help(self): + help = self._root_section.format_help() + if help: + help = self._long_break_matcher.sub('\n\n', help) + help = help.strip('\n') + '\n' + return help + + def _join_parts(self, part_strings): + return ''.join([part + for part in part_strings + if part and part is not SUPPRESS]) + + def _format_usage(self, usage, actions, groups, prefix): + if prefix is None: + prefix = _('usage: ') + + # if usage is specified, use that + if usage is not None: + usage = usage % dict(prog=self._prog) + + # if no optionals or positionals are available, usage is just prog + elif usage is None and not actions: + usage = '%(prog)s' % dict(prog=self._prog) + + # if optionals and positionals are available, calculate usage + elif usage is None: + prog = '%(prog)s' % dict(prog=self._prog) + + # split optionals from positionals + optionals = [] + positionals = [] + for action in actions: + if action.option_strings: + optionals.append(action) + else: + positionals.append(action) + + # build full usage string + format = self._format_actions_usage + action_usage = format(optionals + positionals, groups) + usage = ' '.join([s for s in [prog, action_usage] if s]) + + # wrap the usage parts if it's too long + text_width = self._width - self._current_indent + if len(prefix) + len(usage) > text_width: + + # break usage into wrappable parts + part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' + opt_usage = format(optionals, groups) + pos_usage = format(positionals, groups) + opt_parts = _re.findall(part_regexp, opt_usage) + pos_parts = _re.findall(part_regexp, pos_usage) + assert ' '.join(opt_parts) == opt_usage + assert ' '.join(pos_parts) == pos_usage + + # helper for wrapping lines + def get_lines(parts, indent, prefix=None): + lines = [] + line = [] + if prefix is not None: + line_len = len(prefix) - 1 + else: + line_len = len(indent) - 1 + for part in parts: + if line_len + 1 + len(part) > text_width: + lines.append(indent + ' '.join(line)) + line = [] + line_len = len(indent) - 1 + line.append(part) + line_len += len(part) + 1 + if line: + lines.append(indent + ' '.join(line)) + if prefix is not None: + lines[0] = lines[0][len(indent):] + return lines + + # if prog is short, follow it with optionals or positionals + if len(prefix) + len(prog) <= 0.75 * text_width: + indent = ' ' * (len(prefix) + len(prog) + 1) + if opt_parts: + lines = get_lines([prog] + opt_parts, indent, prefix) + lines.extend(get_lines(pos_parts, indent)) + elif pos_parts: + lines = get_lines([prog] + pos_parts, indent, prefix) + else: + lines = [prog] + + # if prog is long, put it on its own line + else: + indent = ' ' * len(prefix) + parts = opt_parts + pos_parts + lines = get_lines(parts, indent) + if len(lines) > 1: + lines = [] + lines.extend(get_lines(opt_parts, indent)) + lines.extend(get_lines(pos_parts, indent)) + lines = [prog] + lines + + # join lines into usage + usage = '\n'.join(lines) + + # prefix with 'usage:' + return '%s%s\n\n' % (prefix, usage) + + def _format_actions_usage(self, actions, groups): + # find group indices and identify actions in groups + group_actions = set() + inserts = {} + for group in groups: + try: + start = actions.index(group._group_actions[0]) + except ValueError: + continue + else: + end = start + len(group._group_actions) + if actions[start:end] == group._group_actions: + for action in group._group_actions: + group_actions.add(action) + if not group.required: + if start in inserts: + inserts[start] += ' [' + else: + inserts[start] = '[' + inserts[end] = ']' + else: + if start in inserts: + inserts[start] += ' (' + else: + inserts[start] = '(' + inserts[end] = ')' + for i in range(start + 1, end): + inserts[i] = '|' + + # collect all actions format strings + parts = [] + for i, action in enumerate(actions): + + # suppressed arguments are marked with None + # remove | separators for suppressed arguments + if action.help is SUPPRESS: + parts.append(None) + if inserts.get(i) == '|': + inserts.pop(i) + elif inserts.get(i + 1) == '|': + inserts.pop(i + 1) + + # produce all arg strings + elif not action.option_strings: + part = self._format_args(action, action.dest) + + # if it's in a group, strip the outer [] + if action in group_actions: + if part[0] == '[' and part[-1] == ']': + part = part[1:-1] + + # add the action string to the list + parts.append(part) + + # produce the first way to invoke the option in brackets + else: + option_string = action.option_strings[0] + + # if the Optional doesn't take a value, format is: + # -s or --long + if action.nargs == 0: + part = '%s' % option_string + + # if the Optional takes a value, format is: + # -s ARGS or --long ARGS + else: + default = action.dest.upper() + args_string = self._format_args(action, default) + part = '%s %s' % (option_string, args_string) + + # make it look optional if it's not required or in a group + if not action.required and action not in group_actions: + part = '[%s]' % part + + # add the action string to the list + parts.append(part) + + # insert things at the necessary indices + for i in sorted(inserts, reverse=True): + parts[i:i] = [inserts[i]] + + # join all the action items with spaces + text = ' '.join([item for item in parts if item is not None]) + + # clean up separators for mutually exclusive groups + open = r'[\[(]' + close = r'[\])]' + text = _re.sub(r'(%s) ' % open, r'\1', text) + text = _re.sub(r' (%s)' % close, r'\1', text) + text = _re.sub(r'%s *%s' % (open, close), r'', text) + text = _re.sub(r'\(([^|]*)\)', r'\1', text) + text = text.strip() + + # return the text + return text + + def _format_text(self, text): + if '%(prog)' in text: + text = text % dict(prog=self._prog) + text_width = self._width - self._current_indent + indent = ' ' * self._current_indent + return self._fill_text(text, text_width, indent) + '\n\n' + + def _format_action(self, action): + # determine the required width and the entry label + help_position = min(self._action_max_length + 2, + self._max_help_position) + help_width = self._width - help_position + action_width = help_position - self._current_indent - 2 + action_header = self._format_action_invocation(action) + + # ho nelp; start on same line and add a final newline + if not action.help: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + + # short action name; start on the same line and pad two spaces + elif len(action_header) <= action_width: + tup = self._current_indent, '', action_width, action_header + action_header = '%*s%-*s ' % tup + indent_first = 0 + + # long action name; start on the next line + else: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + indent_first = help_position + + # collect the pieces of the action help + parts = [action_header] + + # if there was help for the action, add lines of help text + if action.help: + help_text = self._expand_help(action) + help_lines = self._split_lines(help_text, help_width) + parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) + for line in help_lines[1:]: + parts.append('%*s%s\n' % (help_position, '', line)) + + # or add a newline if the description doesn't end with one + elif not action_header.endswith('\n'): + parts.append('\n') + + # if there are any sub-actions, add their help as well + for subaction in self._iter_indented_subactions(action): + parts.append(self._format_action(subaction)) + + # return a single string + return self._join_parts(parts) + + def _format_action_invocation(self, action): + if not action.option_strings: + metavar, = self._metavar_formatter(action, action.dest)(1) + return metavar + + else: + parts = [] + + # if the Optional doesn't take a value, format is: + # -s, --long + if action.nargs == 0: + parts.extend(action.option_strings) + + # if the Optional takes a value, format is: + # -s ARGS, --long ARGS + else: + default = action.dest.upper() + args_string = self._format_args(action, default) + for option_string in action.option_strings: + parts.append('%s %s' % (option_string, args_string)) + + return ', '.join(parts) + + def _metavar_formatter(self, action, default_metavar): + if action.metavar is not None: + result = action.metavar + elif action.choices is not None: + choice_strs = [str(choice) for choice in action.choices] + result = '{%s}' % ','.join(choice_strs) + else: + result = default_metavar + + def format(tuple_size): + if isinstance(result, tuple): + return result + else: + return (result, ) * tuple_size + return format + + def _format_args(self, action, default_metavar): + get_metavar = self._metavar_formatter(action, default_metavar) + if action.nargs is None: + result = '%s' % get_metavar(1) + elif action.nargs == OPTIONAL: + result = '[%s]' % get_metavar(1) + elif action.nargs == ZERO_OR_MORE: + result = '[%s [%s ...]]' % get_metavar(2) + elif action.nargs == ONE_OR_MORE: + result = '%s [%s ...]' % get_metavar(2) + elif action.nargs == REMAINDER: + result = '...' + elif action.nargs == PARSER: + result = '%s ...' % get_metavar(1) + else: + formats = ['%s' for _ in range(action.nargs)] + result = ' '.join(formats) % get_metavar(action.nargs) + return result + + def _expand_help(self, action): + params = dict(vars(action), prog=self._prog) + for name in list(params): + if params[name] is SUPPRESS: + del params[name] + for name in list(params): + if hasattr(params[name], '__name__'): + params[name] = params[name].__name__ + if params.get('choices') is not None: + choices_str = ', '.join([str(c) for c in params['choices']]) + params['choices'] = choices_str + return self._get_help_string(action) % params + + def _iter_indented_subactions(self, action): + try: + get_subactions = action._get_subactions + except AttributeError: + pass + else: + self._indent() + for subaction in get_subactions(): + yield subaction + self._dedent() + + def _split_lines(self, text, width): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.wrap(text, width) + + def _fill_text(self, text, width, indent): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.fill(text, width, initial_indent=indent, + subsequent_indent=indent) + + def _get_help_string(self, action): + return action.help + + +class RawDescriptionHelpFormatter(HelpFormatter): + """Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _fill_text(self, text, width, indent): + return ''.join([indent + line for line in text.splitlines(True)]) + + +class RawTextHelpFormatter(RawDescriptionHelpFormatter): + """Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _split_lines(self, text, width): + return text.splitlines() + + +class ArgumentDefaultsHelpFormatter(HelpFormatter): + """Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_help_string(self, action): + help = action.help + if '%(default)' not in action.help: + if action.default is not SUPPRESS: + defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] + if action.option_strings or action.nargs in defaulting_nargs: + help += ' (default: %(default)s)' + return help + + +# ===================== +# Options and Arguments +# ===================== + +def _get_action_name(argument): + if argument is None: + return None + elif argument.option_strings: + return '/'.join(argument.option_strings) + elif argument.metavar not in (None, SUPPRESS): + return argument.metavar + elif argument.dest not in (None, SUPPRESS): + return argument.dest + else: + return None + + +class ArgumentError(Exception): + """An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + """ + + def __init__(self, argument, message): + self.argument_name = _get_action_name(argument) + self.message = message + + def __str__(self): + if self.argument_name is None: + format = '%(message)s' + else: + format = 'argument %(argument_name)s: %(message)s' + return format % dict(message=self.message, + argument_name=self.argument_name) + + +class ArgumentTypeError(Exception): + """An error from trying to convert a command line string to a type.""" + pass + + +# ============== +# Action classes +# ============== + +class Action(_AttributeHolder): + """Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- The type which the command-line arguments should be converted + to, should be one of 'string', 'int', 'float', 'complex' or a + callable object that accepts a single string argument. If None, + 'string' is assumed. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + """ + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + self.option_strings = option_strings + self.dest = dest + self.nargs = nargs + self.const = const + self.default = default + self.type = type + self.choices = choices + self.required = required + self.help = help + self.metavar = metavar + + def _get_kwargs(self): + names = [ + 'option_strings', + 'dest', + 'nargs', + 'const', + 'default', + 'type', + 'choices', + 'help', + 'metavar', + ] + return [(name, getattr(self, name)) for name in names] + + def __call__(self, parser, namespace, values, option_string=None): + raise NotImplementedError(_('.__call__() not defined')) + + +class _StoreAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for store actions must be > 0; if you ' + 'have nothing to store, actions such as store ' + 'true or store const may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_StoreAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_StoreConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=False, + required=False, + help=None): + super(_StoreTrueAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + required=required, + help=help) + + +class _StoreFalseAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=True, + required=False, + help=None): + super(_StoreFalseAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=False, + default=default, + required=required, + help=help) + + +class _AppendAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for append actions must be > 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_AppendAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(values) + setattr(namespace, self.dest, items) + + +class _AppendConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_AppendConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(self.const) + setattr(namespace, self.dest, items) + + +class _CountAction(Action): + + def __init__(self, + option_strings, + dest, + default=None, + required=False, + help=None): + super(_CountAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = _ensure_value(namespace, self.dest, 0) + 1 + setattr(namespace, self.dest, new_count) + + +class _HelpAction(Action): + + def __init__(self, + option_strings, + dest=SUPPRESS, + default=SUPPRESS, + help=None): + super(_HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + parser.print_help() + parser.exit() + + +class _VersionAction(Action): + + def __init__(self, + option_strings, + version=None, + dest=SUPPRESS, + default=SUPPRESS, + help="show program's version number and exit"): + super(_VersionAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + self.version = version + + def __call__(self, parser, namespace, values, option_string=None): + version = self.version + if version is None: + version = parser.version + formatter = parser._get_formatter() + formatter.add_text(version) + parser.exit(message=formatter.format_help()) + + +class _SubParsersAction(Action): + + class _ChoicesPseudoAction(Action): + + def __init__(self, name, help): + sup = super(_SubParsersAction._ChoicesPseudoAction, self) + sup.__init__(option_strings=[], dest=name, help=help) + + def __init__(self, + option_strings, + prog, + parser_class, + dest=SUPPRESS, + help=None, + metavar=None): + + self._prog_prefix = prog + self._parser_class = parser_class + self._name_parser_map = {} + self._choices_actions = [] + + super(_SubParsersAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=PARSER, + choices=self._name_parser_map, + help=help, + metavar=metavar) + + def add_parser(self, name, **kwargs): + # set prog from the existing prefix + if kwargs.get('prog') is None: + kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + + # create a pseudo-action to hold the choice help + if 'help' in kwargs: + help = kwargs.pop('help') + choice_action = self._ChoicesPseudoAction(name, help) + self._choices_actions.append(choice_action) + + # create the parser and add it to the map + parser = self._parser_class(**kwargs) + self._name_parser_map[name] = parser + return parser + + def _get_subactions(self): + return self._choices_actions + + def __call__(self, parser, namespace, values, option_string=None): + parser_name = values[0] + arg_strings = values[1:] + + # set the parser name if requested + if self.dest is not SUPPRESS: + setattr(namespace, self.dest, parser_name) + + # select the parser + try: + parser = self._name_parser_map[parser_name] + except KeyError: + tup = parser_name, ', '.join(self._name_parser_map) + msg = _('unknown parser %r (choices: %s)' % tup) + raise ArgumentError(self, msg) + + # parse all the remaining options into the namespace + # store any unrecognized options on the object, so that the top + # level parser can decide what to do with them + namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) + if arg_strings: + vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) + getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) + + +# ============== +# Type classes +# ============== + +class FileType(object): + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + """ + + def __init__(self, mode='r', bufsize=None): + self._mode = mode + self._bufsize = bufsize + + def __call__(self, string): + # the special argument "-" means sys.std{in,out} + if string == '-': + if 'r' in self._mode: + return _sys.stdin + elif 'w' in self._mode: + return _sys.stdout + else: + msg = _('argument "-" with mode %r' % self._mode) + raise ValueError(msg) + + # all other arguments are used as file names + if self._bufsize: + return open(string, self._mode, self._bufsize) + else: + return open(string, self._mode) + + def __repr__(self): + args = [self._mode, self._bufsize] + args_str = ', '.join([repr(arg) for arg in args if arg is not None]) + return '%s(%s)' % (type(self).__name__, args_str) + +# =========================== +# Optional and Positional Parsing +# =========================== + +class Namespace(_AttributeHolder): + """Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + """ + + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + __hash__ = None + + def __eq__(self, other): + return vars(self) == vars(other) + + def __ne__(self, other): + return not (self == other) + + def __contains__(self, key): + return key in self.__dict__ + + +class _ActionsContainer(object): + + def __init__(self, + description, + prefix_chars, + argument_default, + conflict_handler): + super(_ActionsContainer, self).__init__() + + self.description = description + self.argument_default = argument_default + self.prefix_chars = prefix_chars + self.conflict_handler = conflict_handler + + # set up registries + self._registries = {} + + # register actions + self.register('action', None, _StoreAction) + self.register('action', 'store', _StoreAction) + self.register('action', 'store_const', _StoreConstAction) + self.register('action', 'store_true', _StoreTrueAction) + self.register('action', 'store_false', _StoreFalseAction) + self.register('action', 'append', _AppendAction) + self.register('action', 'append_const', _AppendConstAction) + self.register('action', 'count', _CountAction) + self.register('action', 'help', _HelpAction) + self.register('action', 'version', _VersionAction) + self.register('action', 'parsers', _SubParsersAction) + + # raise an exception if the conflict handler is invalid + self._get_handler() + + # action storage + self._actions = [] + self._option_string_actions = {} + + # groups + self._action_groups = [] + self._mutually_exclusive_groups = [] + + # defaults storage + self._defaults = {} + + # determines whether an "option" looks like a negative number + self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') + + # whether or not there are any optionals that look like negative + # numbers -- uses a list so it can be shared and edited + self._has_negative_number_optionals = [] + + # ==================== + # Registration methods + # ==================== + def register(self, registry_name, value, object): + registry = self._registries.setdefault(registry_name, {}) + registry[value] = object + + def _registry_get(self, registry_name, value, default=None): + return self._registries[registry_name].get(value, default) + + # ================================== + # Namespace default accessor methods + # ================================== + def set_defaults(self, **kwargs): + self._defaults.update(kwargs) + + # if these defaults match any existing arguments, replace + # the previous default on the object with the new one + for action in self._actions: + if action.dest in kwargs: + action.default = kwargs[action.dest] + + def get_default(self, dest): + for action in self._actions: + if action.dest == dest and action.default is not None: + return action.default + return self._defaults.get(dest, None) + + + # ======================= + # Adding argument actions + # ======================= + def add_argument(self, *args, **kwargs): + """ + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + """ + + # if no positional args are supplied or only one is supplied and + # it doesn't look like an option string, parse a positional + # argument + chars = self.prefix_chars + if not args or len(args) == 1 and args[0][0] not in chars: + if args and 'dest' in kwargs: + raise ValueError('dest supplied twice for positional argument') + kwargs = self._get_positional_kwargs(*args, **kwargs) + + # otherwise, we're adding an optional argument + else: + kwargs = self._get_optional_kwargs(*args, **kwargs) + + # if no default was supplied, use the parser-level default + if 'default' not in kwargs: + dest = kwargs['dest'] + if dest in self._defaults: + kwargs['default'] = self._defaults[dest] + elif self.argument_default is not None: + kwargs['default'] = self.argument_default + + # create the action object, and add it to the parser + action_class = self._pop_action_class(kwargs) + if not _callable(action_class): + raise ValueError('unknown action "%s"' % action_class) + action = action_class(**kwargs) + + # raise an error if the action type is not callable + type_func = self._registry_get('type', action.type, action.type) + if not _callable(type_func): + raise ValueError('%r is not callable' % type_func) + + return self._add_action(action) + + def add_argument_group(self, *args, **kwargs): + group = _ArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group + + def add_mutually_exclusive_group(self, **kwargs): + group = _MutuallyExclusiveGroup(self, **kwargs) + self._mutually_exclusive_groups.append(group) + return group + + def _add_action(self, action): + # resolve any conflicts + self._check_conflict(action) + + # add to actions list + self._actions.append(action) + action.container = self + + # index the action by any option strings it has + for option_string in action.option_strings: + self._option_string_actions[option_string] = action + + # set the flag if any option strings look like negative numbers + for option_string in action.option_strings: + if self._negative_number_matcher.match(option_string): + if not self._has_negative_number_optionals: + self._has_negative_number_optionals.append(True) + + # return the created action + return action + + def _remove_action(self, action): + self._actions.remove(action) + + def _add_container_actions(self, container): + # collect groups by titles + title_group_map = {} + for group in self._action_groups: + if group.title in title_group_map: + msg = _('cannot merge actions - two groups are named %r') + raise ValueError(msg % (group.title)) + title_group_map[group.title] = group + + # map each action to its group + group_map = {} + for group in container._action_groups: + + # if a group with the title exists, use that, otherwise + # create a new group matching the container's group + if group.title not in title_group_map: + title_group_map[group.title] = self.add_argument_group( + title=group.title, + description=group.description, + conflict_handler=group.conflict_handler) + + # map the actions to their new group + for action in group._group_actions: + group_map[action] = title_group_map[group.title] + + # add container's mutually exclusive groups + # NOTE: if add_mutually_exclusive_group ever gains title= and + # description= then this code will need to be expanded as above + for group in container._mutually_exclusive_groups: + mutex_group = self.add_mutually_exclusive_group( + required=group.required) + + # map the actions to their new mutex group + for action in group._group_actions: + group_map[action] = mutex_group + + # add all actions to this container or their group + for action in container._actions: + group_map.get(action, self)._add_action(action) + + def _get_positional_kwargs(self, dest, **kwargs): + # make sure required is not specified + if 'required' in kwargs: + msg = _("'required' is an invalid argument for positionals") + raise TypeError(msg) + + # mark positional arguments as required if at least one is + # always required + if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: + kwargs['required'] = True + if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: + kwargs['required'] = True + + # return the keyword arguments with no option strings + return dict(kwargs, dest=dest, option_strings=[]) + + def _get_optional_kwargs(self, *args, **kwargs): + # determine short and long option strings + option_strings = [] + long_option_strings = [] + for option_string in args: + # error on strings that don't start with an appropriate prefix + if not option_string[0] in self.prefix_chars: + msg = _('invalid option string %r: ' + 'must start with a character %r') + tup = option_string, self.prefix_chars + raise ValueError(msg % tup) + + # strings starting with two prefix characters are long options + option_strings.append(option_string) + if option_string[0] in self.prefix_chars: + if len(option_string) > 1: + if option_string[1] in self.prefix_chars: + long_option_strings.append(option_string) + + # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' + dest = kwargs.pop('dest', None) + if dest is None: + if long_option_strings: + dest_option_string = long_option_strings[0] + else: + dest_option_string = option_strings[0] + dest = dest_option_string.lstrip(self.prefix_chars) + if not dest: + msg = _('dest= is required for options like %r') + raise ValueError(msg % option_string) + dest = dest.replace('-', '_') + + # return the updated keyword arguments + return dict(kwargs, dest=dest, option_strings=option_strings) + + def _pop_action_class(self, kwargs, default=None): + action = kwargs.pop('action', default) + return self._registry_get('action', action, action) + + def _get_handler(self): + # determine function from conflict handler string + handler_func_name = '_handle_conflict_%s' % self.conflict_handler + try: + return getattr(self, handler_func_name) + except AttributeError: + msg = _('invalid conflict_resolution value: %r') + raise ValueError(msg % self.conflict_handler) + + def _check_conflict(self, action): + + # find all options that conflict with this option + confl_optionals = [] + for option_string in action.option_strings: + if option_string in self._option_string_actions: + confl_optional = self._option_string_actions[option_string] + confl_optionals.append((option_string, confl_optional)) + + # resolve any conflicts + if confl_optionals: + conflict_handler = self._get_handler() + conflict_handler(action, confl_optionals) + + def _handle_conflict_error(self, action, conflicting_actions): + message = _('conflicting option string(s): %s') + conflict_string = ', '.join([option_string + for option_string, action + in conflicting_actions]) + raise ArgumentError(action, message % conflict_string) + + def _handle_conflict_resolve(self, action, conflicting_actions): + + # remove all conflicting options + for option_string, action in conflicting_actions: + + # remove the conflicting option + action.option_strings.remove(option_string) + self._option_string_actions.pop(option_string, None) + + # if the option now has no option string, remove it from the + # container holding it + if not action.option_strings: + action.container._remove_action(action) + + +class _ArgumentGroup(_ActionsContainer): + + def __init__(self, container, title=None, description=None, **kwargs): + # add any missing keyword arguments by checking the container + update = kwargs.setdefault + update('conflict_handler', container.conflict_handler) + update('prefix_chars', container.prefix_chars) + update('argument_default', container.argument_default) + super_init = super(_ArgumentGroup, self).__init__ + super_init(description=description, **kwargs) + + # group attributes + self.title = title + self._group_actions = [] + + # share most attributes with the container + self._registries = container._registries + self._actions = container._actions + self._option_string_actions = container._option_string_actions + self._defaults = container._defaults + self._has_negative_number_optionals = \ + container._has_negative_number_optionals + + def _add_action(self, action): + action = super(_ArgumentGroup, self)._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + super(_ArgumentGroup, self)._remove_action(action) + self._group_actions.remove(action) + + +class _MutuallyExclusiveGroup(_ArgumentGroup): + + def __init__(self, container, required=False): + super(_MutuallyExclusiveGroup, self).__init__(container) + self.required = required + self._container = container + + def _add_action(self, action): + if action.required: + msg = _('mutually exclusive arguments must be optional') + raise ValueError(msg) + action = self._container._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + self._container._remove_action(action) + self._group_actions.remove(action) + + +class ArgumentParser(_AttributeHolder, _ActionsContainer): + """Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + """ + + def __init__(self, + prog=None, + usage=None, + description=None, + epilog=None, + version=None, + parents=[], + formatter_class=HelpFormatter, + prefix_chars='-', + fromfile_prefix_chars=None, + argument_default=None, + conflict_handler='error', + add_help=True): + + if version is not None: + import warnings + warnings.warn( + """The "version" argument to ArgumentParser is deprecated. """ + """Please use """ + """"add_argument(..., action='version', version="N", ...)" """ + """instead""", DeprecationWarning) + + superinit = super(ArgumentParser, self).__init__ + superinit(description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler) + + # default setting for prog + if prog is None: + prog = _os.path.basename(_sys.argv[0]) + + self.prog = prog + self.usage = usage + self.epilog = epilog + self.version = version + self.formatter_class = formatter_class + self.fromfile_prefix_chars = fromfile_prefix_chars + self.add_help = add_help + + add_group = self.add_argument_group + self._positionals = add_group(_('positional arguments')) + self._optionals = add_group(_('optional arguments')) + self._subparsers = None + + # register types + def identity(string): + return string + self.register('type', None, identity) + + # add help and version arguments if necessary + # (using explicit default to override global argument_default) + if '-' in prefix_chars: + default_prefix = '-' + else: + default_prefix = prefix_chars[0] + if self.add_help: + self.add_argument( + default_prefix+'h', default_prefix*2+'help', + action='help', default=SUPPRESS, + help=_('show this help message and exit')) + if self.version: + self.add_argument( + default_prefix+'v', default_prefix*2+'version', + action='version', default=SUPPRESS, + version=self.version, + help=_("show program's version number and exit")) + + # add parent arguments and defaults + for parent in parents: + self._add_container_actions(parent) + try: + defaults = parent._defaults + except AttributeError: + pass + else: + self._defaults.update(defaults) + + # ======================= + # Pretty __repr__ methods + # ======================= + def _get_kwargs(self): + names = [ + 'prog', + 'usage', + 'description', + 'version', + 'formatter_class', + 'conflict_handler', + 'add_help', + ] + return [(name, getattr(self, name)) for name in names] + + # ================================== + # Optional/Positional adding methods + # ================================== + def add_subparsers(self, **kwargs): + if self._subparsers is not None: + self.error(_('cannot have multiple subparser arguments')) + + # add the parser class to the arguments if it's not present + kwargs.setdefault('parser_class', type(self)) + + if 'title' in kwargs or 'description' in kwargs: + title = _(kwargs.pop('title', 'subcommands')) + description = _(kwargs.pop('description', None)) + self._subparsers = self.add_argument_group(title, description) + else: + self._subparsers = self._positionals + + # prog defaults to the usage message of this parser, skipping + # optional arguments and with no "usage:" prefix + if kwargs.get('prog') is None: + formatter = self._get_formatter() + positionals = self._get_positional_actions() + groups = self._mutually_exclusive_groups + formatter.add_usage(self.usage, positionals, groups, '') + kwargs['prog'] = formatter.format_help().strip() + + # create the parsers action and add it to the positionals list + parsers_class = self._pop_action_class(kwargs, 'parsers') + action = parsers_class(option_strings=[], **kwargs) + self._subparsers._add_action(action) + + # return the created parsers action + return action + + def _add_action(self, action): + if action.option_strings: + self._optionals._add_action(action) + else: + self._positionals._add_action(action) + return action + + def _get_optional_actions(self): + return [action + for action in self._actions + if action.option_strings] + + def _get_positional_actions(self): + return [action + for action in self._actions + if not action.option_strings] + + # ===================================== + # Command line argument parsing methods + # ===================================== + def parse_args(self, args=None, namespace=None): + args, argv = self.parse_known_args(args, namespace) + if argv: + msg = _('unrecognized arguments: %s') + self.error(msg % ' '.join(argv)) + return args + + def parse_known_args(self, args=None, namespace=None): + # args default to the system args + if args is None: + args = _sys.argv[1:] + + # default Namespace built from parser defaults + if namespace is None: + namespace = Namespace() + + # add any action defaults that aren't present + for action in self._actions: + if action.dest is not SUPPRESS: + if not hasattr(namespace, action.dest): + if action.default is not SUPPRESS: + default = action.default + if isinstance(action.default, basestring): + default = self._get_value(action, default) + setattr(namespace, action.dest, default) + + # add any parser defaults that aren't present + for dest in self._defaults: + if not hasattr(namespace, dest): + setattr(namespace, dest, self._defaults[dest]) + + # parse the arguments and exit if there are any errors + try: + namespace, args = self._parse_known_args(args, namespace) + if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): + args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) + delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) + return namespace, args + except ArgumentError: + err = _sys.exc_info()[1] + self.error(str(err)) + + def _parse_known_args(self, arg_strings, namespace): + # replace arg strings that are file references + if self.fromfile_prefix_chars is not None: + arg_strings = self._read_args_from_files(arg_strings) + + # map all mutually exclusive arguments to the other arguments + # they can't occur with + action_conflicts = {} + for mutex_group in self._mutually_exclusive_groups: + group_actions = mutex_group._group_actions + for i, mutex_action in enumerate(mutex_group._group_actions): + conflicts = action_conflicts.setdefault(mutex_action, []) + conflicts.extend(group_actions[:i]) + conflicts.extend(group_actions[i + 1:]) + + # find all option indices, and determine the arg_string_pattern + # which has an 'O' if there is an option at an index, + # an 'A' if there is an argument, or a '-' if there is a '--' + option_string_indices = {} + arg_string_pattern_parts = [] + arg_strings_iter = iter(arg_strings) + for i, arg_string in enumerate(arg_strings_iter): + + # all args after -- are non-options + if arg_string == '--': + arg_string_pattern_parts.append('-') + for arg_string in arg_strings_iter: + arg_string_pattern_parts.append('A') + + # otherwise, add the arg to the arg strings + # and note the index if it was an option + else: + option_tuple = self._parse_optional(arg_string) + if option_tuple is None: + pattern = 'A' + else: + option_string_indices[i] = option_tuple + pattern = 'O' + arg_string_pattern_parts.append(pattern) + + # join the pieces together to form the pattern + arg_strings_pattern = ''.join(arg_string_pattern_parts) + + # converts arg strings to the appropriate and then takes the action + seen_actions = set() + seen_non_default_actions = set() + + def take_action(action, argument_strings, option_string=None): + seen_actions.add(action) + argument_values = self._get_values(action, argument_strings) + + # error if this argument is not allowed with other previously + # seen arguments, assuming that actions that use the default + # value don't really count as "present" + if argument_values is not action.default: + seen_non_default_actions.add(action) + for conflict_action in action_conflicts.get(action, []): + if conflict_action in seen_non_default_actions: + msg = _('not allowed with argument %s') + action_name = _get_action_name(conflict_action) + raise ArgumentError(action, msg % action_name) + + # take the action if we didn't receive a SUPPRESS value + # (e.g. from a default) + if argument_values is not SUPPRESS: + action(self, namespace, argument_values, option_string) + + # function to convert arg_strings into an optional action + def consume_optional(start_index): + + # get the optional identified at this index + option_tuple = option_string_indices[start_index] + action, option_string, explicit_arg = option_tuple + + # identify additional optionals in the same arg string + # (e.g. -xyz is the same as -x -y -z if no args are required) + match_argument = self._match_argument + action_tuples = [] + while True: + + # if we found no optional action, skip it + if action is None: + extras.append(arg_strings[start_index]) + return start_index + 1 + + # if there is an explicit argument, try to match the + # optional's string arguments to only this + if explicit_arg is not None: + arg_count = match_argument(action, 'A') + + # if the action is a single-dash option and takes no + # arguments, try to parse more single-dash options out + # of the tail of the option string + chars = self.prefix_chars + if arg_count == 0 and option_string[1] not in chars: + action_tuples.append((action, [], option_string)) + char = option_string[0] + option_string = char + explicit_arg[0] + new_explicit_arg = explicit_arg[1:] or None + optionals_map = self._option_string_actions + if option_string in optionals_map: + action = optionals_map[option_string] + explicit_arg = new_explicit_arg + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if the action expect exactly one argument, we've + # successfully matched the option; exit the loop + elif arg_count == 1: + stop = start_index + 1 + args = [explicit_arg] + action_tuples.append((action, args, option_string)) + break + + # error if a double-dash option did not use the + # explicit argument + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if there is no explicit argument, try to match the + # optional's string arguments with the following strings + # if successful, exit the loop + else: + start = start_index + 1 + selected_patterns = arg_strings_pattern[start:] + arg_count = match_argument(action, selected_patterns) + stop = start + arg_count + args = arg_strings[start:stop] + action_tuples.append((action, args, option_string)) + break + + # add the Optional to the list and return the index at which + # the Optional's string args stopped + assert action_tuples + for action, args, option_string in action_tuples: + take_action(action, args, option_string) + return stop + + # the list of Positionals left to be parsed; this is modified + # by consume_positionals() + positionals = self._get_positional_actions() + + # function to convert arg_strings into positional actions + def consume_positionals(start_index): + # match as many Positionals as possible + match_partial = self._match_arguments_partial + selected_pattern = arg_strings_pattern[start_index:] + arg_counts = match_partial(positionals, selected_pattern) + + # slice off the appropriate arg strings for each Positional + # and add the Positional and its args to the list + for action, arg_count in zip(positionals, arg_counts): + args = arg_strings[start_index: start_index + arg_count] + start_index += arg_count + take_action(action, args) + + # slice off the Positionals that we just parsed and return the + # index at which the Positionals' string args stopped + positionals[:] = positionals[len(arg_counts):] + return start_index + + # consume Positionals and Optionals alternately, until we have + # passed the last option string + extras = [] + start_index = 0 + if option_string_indices: + max_option_string_index = max(option_string_indices) + else: + max_option_string_index = -1 + while start_index <= max_option_string_index: + + # consume any Positionals preceding the next option + next_option_string_index = min([ + index + for index in option_string_indices + if index >= start_index]) + if start_index != next_option_string_index: + positionals_end_index = consume_positionals(start_index) + + # only try to parse the next optional if we didn't consume + # the option string during the positionals parsing + if positionals_end_index > start_index: + start_index = positionals_end_index + continue + else: + start_index = positionals_end_index + + # if we consumed all the positionals we could and we're not + # at the index of an option string, there were extra arguments + if start_index not in option_string_indices: + strings = arg_strings[start_index:next_option_string_index] + extras.extend(strings) + start_index = next_option_string_index + + # consume the next optional and any arguments for it + start_index = consume_optional(start_index) + + # consume any positionals following the last Optional + stop_index = consume_positionals(start_index) + + # if we didn't consume all the argument strings, there were extras + extras.extend(arg_strings[stop_index:]) + + # if we didn't use all the Positional objects, there were too few + # arg strings supplied. + if positionals: + self.error(_('too few arguments')) + + # make sure all required actions were present + for action in self._actions: + if action.required: + if action not in seen_actions: + name = _get_action_name(action) + self.error(_('argument %s is required') % name) + + # make sure all required groups had one option present + for group in self._mutually_exclusive_groups: + if group.required: + for action in group._group_actions: + if action in seen_non_default_actions: + break + + # if no actions were used, report the error + else: + names = [_get_action_name(action) + for action in group._group_actions + if action.help is not SUPPRESS] + msg = _('one of the arguments %s is required') + self.error(msg % ' '.join(names)) + + # return the updated namespace and the extra arguments + return namespace, extras + + def _read_args_from_files(self, arg_strings): + # expand arguments referencing files + new_arg_strings = [] + for arg_string in arg_strings: + + # for regular arguments, just add them back into the list + if arg_string[0] not in self.fromfile_prefix_chars: + new_arg_strings.append(arg_string) + + # replace arguments referencing files with the file content + else: + try: + args_file = open(arg_string[1:]) + try: + arg_strings = [] + for arg_line in args_file.read().splitlines(): + for arg in self.convert_arg_line_to_args(arg_line): + arg_strings.append(arg) + arg_strings = self._read_args_from_files(arg_strings) + new_arg_strings.extend(arg_strings) + finally: + args_file.close() + except IOError: + err = _sys.exc_info()[1] + self.error(str(err)) + + # return the modified argument list + return new_arg_strings + + def convert_arg_line_to_args(self, arg_line): + return [arg_line] + + def _match_argument(self, action, arg_strings_pattern): + # match the pattern for this action to the arg strings + nargs_pattern = self._get_nargs_pattern(action) + match = _re.match(nargs_pattern, arg_strings_pattern) + + # raise an exception if we weren't able to find a match + if match is None: + nargs_errors = { + None: _('expected one argument'), + OPTIONAL: _('expected at most one argument'), + ONE_OR_MORE: _('expected at least one argument'), + } + default = _('expected %s argument(s)') % action.nargs + msg = nargs_errors.get(action.nargs, default) + raise ArgumentError(action, msg) + + # return the number of arguments matched + return len(match.group(1)) + + def _match_arguments_partial(self, actions, arg_strings_pattern): + # progressively shorten the actions list by slicing off the + # final actions until we find a match + result = [] + for i in range(len(actions), 0, -1): + actions_slice = actions[:i] + pattern = ''.join([self._get_nargs_pattern(action) + for action in actions_slice]) + match = _re.match(pattern, arg_strings_pattern) + if match is not None: + result.extend([len(string) for string in match.groups()]) + break + + # return the list of arg string counts + return result + + def _parse_optional(self, arg_string): + # if it's an empty string, it was meant to be a positional + if not arg_string: + return None + + # if it doesn't start with a prefix, it was meant to be positional + if not arg_string[0] in self.prefix_chars: + return None + + # if the option string is present in the parser, return the action + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + + # if it's just a single character, it was meant to be positional + if len(arg_string) == 1: + return None + + # if the option string before the "=" is present, return the action + if '=' in arg_string: + option_string, explicit_arg = arg_string.split('=', 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + + # search through all possible prefixes of the option string + # and all actions in the parser for possible interpretations + option_tuples = self._get_option_tuples(arg_string) + + # if multiple actions match, the option string was ambiguous + if len(option_tuples) > 1: + options = ', '.join([option_string + for action, option_string, explicit_arg in option_tuples]) + tup = arg_string, options + self.error(_('ambiguous option: %s could match %s') % tup) + + # if exactly one action matched, this segmentation is good, + # so return the parsed action + elif len(option_tuples) == 1: + option_tuple, = option_tuples + return option_tuple + + # if it was not found as an option, but it looks like a negative + # number, it was meant to be positional + # unless there are negative-number-like options + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + + # if it contains a space, it was meant to be a positional + if ' ' in arg_string: + return None + + # it was meant to be an optional but there is no such option + # in this parser (though it might be a valid option in a subparser) + return None, arg_string, None + + def _get_option_tuples(self, option_string): + result = [] + + # option strings starting with two prefix characters are only + # split at the '=' + chars = self.prefix_chars + if option_string[0] in chars and option_string[1] in chars: + if '=' in option_string: + option_prefix, explicit_arg = option_string.split('=', 1) + else: + option_prefix = option_string + explicit_arg = None + for option_string in self._option_string_actions: + if option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # single character options can be concatenated with their arguments + # but multiple character options always have to have their argument + # separate + elif option_string[0] in chars and option_string[1] not in chars: + option_prefix = option_string + explicit_arg = None + short_option_prefix = option_string[:2] + short_explicit_arg = option_string[2:] + + for option_string in self._option_string_actions: + if option_string == short_option_prefix: + action = self._option_string_actions[option_string] + tup = action, option_string, short_explicit_arg + result.append(tup) + elif option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # shouldn't ever get here + else: + self.error(_('unexpected option string: %s') % option_string) + + # return the collected option tuples + return result + + def _get_nargs_pattern(self, action): + # in all examples below, we have to allow for '--' args + # which are represented as '-' in the pattern + nargs = action.nargs + + # the default (None) is assumed to be a single argument + if nargs is None: + nargs_pattern = '(-*A-*)' + + # allow zero or one arguments + elif nargs == OPTIONAL: + nargs_pattern = '(-*A?-*)' + + # allow zero or more arguments + elif nargs == ZERO_OR_MORE: + nargs_pattern = '(-*[A-]*)' + + # allow one or more arguments + elif nargs == ONE_OR_MORE: + nargs_pattern = '(-*A[A-]*)' + + # allow any number of options or arguments + elif nargs == REMAINDER: + nargs_pattern = '([-AO]*)' + + # allow one argument followed by any number of options or arguments + elif nargs == PARSER: + nargs_pattern = '(-*A[-AO]*)' + + # all others should be integers + else: + nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + + # if this is an optional action, -- is not allowed + if action.option_strings: + nargs_pattern = nargs_pattern.replace('-*', '') + nargs_pattern = nargs_pattern.replace('-', '') + + # return the pattern + return nargs_pattern + + # ======================== + # Value conversion methods + # ======================== + def _get_values(self, action, arg_strings): + # for everything but PARSER args, strip out '--' + if action.nargs not in [PARSER, REMAINDER]: + arg_strings = [s for s in arg_strings if s != '--'] + + # optional argument produces a default when not present + if not arg_strings and action.nargs == OPTIONAL: + if action.option_strings: + value = action.const + else: + value = action.default + if isinstance(value, basestring): + value = self._get_value(action, value) + self._check_value(action, value) + + # when nargs='*' on a positional, if there were no command-line + # args, use the default if it is anything other than None + elif (not arg_strings and action.nargs == ZERO_OR_MORE and + not action.option_strings): + if action.default is not None: + value = action.default + else: + value = arg_strings + self._check_value(action, value) + + # single argument or optional argument produces a single value + elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: + arg_string, = arg_strings + value = self._get_value(action, arg_string) + self._check_value(action, value) + + # REMAINDER arguments convert all values, checking none + elif action.nargs == REMAINDER: + value = [self._get_value(action, v) for v in arg_strings] + + # PARSER arguments convert all values, but check only the first + elif action.nargs == PARSER: + value = [self._get_value(action, v) for v in arg_strings] + self._check_value(action, value[0]) + + # all other types of nargs produce a list + else: + value = [self._get_value(action, v) for v in arg_strings] + for v in value: + self._check_value(action, v) + + # return the converted value + return value + + def _get_value(self, action, arg_string): + type_func = self._registry_get('type', action.type, action.type) + if not _callable(type_func): + msg = _('%r is not callable') + raise ArgumentError(action, msg % type_func) + + # convert the value to the appropriate type + try: + result = type_func(arg_string) + + # ArgumentTypeErrors indicate errors + except ArgumentTypeError: + name = getattr(action.type, '__name__', repr(action.type)) + msg = str(_sys.exc_info()[1]) + raise ArgumentError(action, msg) + + # TypeErrors or ValueErrors also indicate errors + except (TypeError, ValueError): + name = getattr(action.type, '__name__', repr(action.type)) + msg = _('invalid %s value: %r') + raise ArgumentError(action, msg % (name, arg_string)) + + # return the converted value + return result + + def _check_value(self, action, value): + # converted value must be one of the choices (if specified) + if action.choices is not None and value not in action.choices: + tup = value, ', '.join(map(repr, action.choices)) + msg = _('invalid choice: %r (choose from %s)') % tup + raise ArgumentError(action, msg) + + # ======================= + # Help-formatting methods + # ======================= + def format_usage(self): + formatter = self._get_formatter() + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + return formatter.format_help() + + def format_help(self): + formatter = self._get_formatter() + + # usage + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + + # description + formatter.add_text(self.description) + + # positionals, optionals and user-defined groups + for action_group in self._action_groups: + formatter.start_section(action_group.title) + formatter.add_text(action_group.description) + formatter.add_arguments(action_group._group_actions) + formatter.end_section() + + # epilog + formatter.add_text(self.epilog) + + # determine help from format above + return formatter.format_help() + + def format_version(self): + import warnings + warnings.warn( + 'The format_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + formatter = self._get_formatter() + formatter.add_text(self.version) + return formatter.format_help() + + def _get_formatter(self): + return self.formatter_class(prog=self.prog) + + # ===================== + # Help-printing methods + # ===================== + def print_usage(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_usage(), file) + + def print_help(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_help(), file) + + def print_version(self, file=None): + import warnings + warnings.warn( + 'The print_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + self._print_message(self.format_version(), file) + + def _print_message(self, message, file=None): + if message: + if file is None: + file = _sys.stderr + file.write(message) + + # =============== + # Exiting methods + # =============== + def exit(self, status=0, message=None): + if message: + self._print_message(message, _sys.stderr) + _sys.exit(status) + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(_sys.stderr) + self.exit(2, _('%s: error: %s\n') % (self.prog, message)) diff --git a/venv/lib/python3.6/site-packages/gunicorn/config.py b/venv/lib/python3.6/site-packages/gunicorn/config.py new file mode 100644 index 0000000..aa97894 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/config.py @@ -0,0 +1,1950 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +# Please remember to run "make -C docs html" after update "desc" attributes. + +import copy +import grp +import inspect +try: + import argparse +except ImportError: # python 2.6 + from . import argparse_compat as argparse +import os +import pwd +import re +import ssl +import sys +import textwrap +import shlex + +from gunicorn import __version__ +from gunicorn import _compat +from gunicorn.errors import ConfigError +from gunicorn.reloader import reloader_engines +from gunicorn import six +from gunicorn import util + +KNOWN_SETTINGS = [] +PLATFORM = sys.platform + + +def make_settings(ignore=None): + settings = {} + ignore = ignore or () + for s in KNOWN_SETTINGS: + setting = s() + if setting.name in ignore: + continue + settings[setting.name] = setting.copy() + return settings + + +def auto_int(_, x): + # for compatible with octal numbers in python3 + if re.match(r'0(\d)', x, re.IGNORECASE): + x = x.replace('0', '0o', 1) + return int(x, 0) + + +class Config(object): + + def __init__(self, usage=None, prog=None): + self.settings = make_settings() + self.usage = usage + self.prog = prog or os.path.basename(sys.argv[0]) + self.env_orig = os.environ.copy() + + def __getattr__(self, name): + if name not in self.settings: + raise AttributeError("No configuration setting for: %s" % name) + return self.settings[name].get() + + def __setattr__(self, name, value): + if name != "settings" and name in self.settings: + raise AttributeError("Invalid access!") + super(Config, self).__setattr__(name, value) + + def set(self, name, value): + if name not in self.settings: + raise AttributeError("No configuration setting for: %s" % name) + self.settings[name].set(value) + + def get_cmd_args_from_env(self): + if 'GUNICORN_CMD_ARGS' in self.env_orig: + return shlex.split(self.env_orig['GUNICORN_CMD_ARGS']) + return [] + + def parser(self): + kwargs = { + "usage": self.usage, + "prog": self.prog + } + parser = argparse.ArgumentParser(**kwargs) + parser.add_argument("-v", "--version", + action="version", default=argparse.SUPPRESS, + version="%(prog)s (version " + __version__ + ")\n", + help="show program's version number and exit") + parser.add_argument("args", nargs="*", help=argparse.SUPPRESS) + + keys = sorted(self.settings, key=self.settings.__getitem__) + for k in keys: + self.settings[k].add_option(parser) + + return parser + + @property + def worker_class_str(self): + uri = self.settings['worker_class'].get() + + ## are we using a threaded worker? + is_sync = uri.endswith('SyncWorker') or uri == 'sync' + if is_sync and self.threads > 1: + return "threads" + return uri + + @property + def worker_class(self): + uri = self.settings['worker_class'].get() + + ## are we using a threaded worker? + is_sync = uri.endswith('SyncWorker') or uri == 'sync' + if is_sync and self.threads > 1: + uri = "gunicorn.workers.gthread.ThreadWorker" + + worker_class = util.load_class(uri) + if hasattr(worker_class, "setup"): + worker_class.setup() + return worker_class + + @property + def address(self): + s = self.settings['bind'].get() + return [util.parse_address(_compat.bytes_to_str(bind)) for bind in s] + + @property + def uid(self): + return self.settings['user'].get() + + @property + def gid(self): + return self.settings['group'].get() + + @property + def proc_name(self): + pn = self.settings['proc_name'].get() + if pn is not None: + return pn + else: + return self.settings['default_proc_name'].get() + + @property + def logger_class(self): + uri = self.settings['logger_class'].get() + if uri == "simple": + # support the default + uri = LoggerClass.default + + # if default logger is in use, and statsd is on, automagically switch + # to the statsd logger + if uri == LoggerClass.default: + if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None: + uri = "gunicorn.instrument.statsd.Statsd" + + logger_class = util.load_class( + uri, + default="gunicorn.glogging.Logger", + section="gunicorn.loggers") + + if hasattr(logger_class, "install"): + logger_class.install() + return logger_class + + @property + def is_ssl(self): + return self.certfile or self.keyfile + + @property + def ssl_options(self): + opts = {} + for name, value in self.settings.items(): + if value.section == 'SSL': + opts[name] = value.get() + return opts + + @property + def env(self): + raw_env = self.settings['raw_env'].get() + env = {} + + if not raw_env: + return env + + for e in raw_env: + s = _compat.bytes_to_str(e) + try: + k, v = s.split('=', 1) + except ValueError: + raise RuntimeError("environment setting %r invalid" % s) + + env[k] = v + + return env + + @property + def sendfile(self): + if self.settings['sendfile'].get() is not None: + return False + + if 'SENDFILE' in os.environ: + sendfile = os.environ['SENDFILE'].lower() + return sendfile in ['y', '1', 'yes', 'true'] + + return True + + @property + def reuse_port(self): + return self.settings['reuse_port'].get() + + @property + def paste_global_conf(self): + raw_global_conf = self.settings['raw_paste_global_conf'].get() + if raw_global_conf is None: + return None + + global_conf = {} + for e in raw_global_conf: + s = _compat.bytes_to_str(e) + try: + k, v = re.split(r'(?= 0.9.7 (or install it via + ``pip install gunicorn[eventlet]``) + * ``gevent`` - Requires gevent >= 0.13 (or install it via + ``pip install gunicorn[gevent]``) + * ``tornado`` - Requires tornado >= 0.2 (or install it via + ``pip install gunicorn[tornado]``) + * ``gthread`` - Python 2 requires the futures package to be installed + (or install it via ``pip install gunicorn[gthread]``) + * ``gaiohttp`` - Deprecated. + + Optionally, you can provide your own worker by giving Gunicorn a + Python path to a subclass of ``gunicorn.workers.base.Worker``. + This alternative syntax will load the gevent class: + ``gunicorn.workers.ggevent.GeventWorker``. + + .. deprecated:: 19.8 + The ``gaiohttp`` worker is deprecated. Please use + ``aiohttp.worker.GunicornWebWorker`` instead. See + :ref:`asyncio-workers` for more information on how to use it. + """ + +class WorkerThreads(Setting): + name = "threads" + section = "Worker Processes" + cli = ["--threads"] + meta = "INT" + validator = validate_pos_int + type = int + default = 1 + desc = """\ + The number of worker threads for handling requests. + + Run each worker with the specified number of threads. + + A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. + You'll want to vary this a bit to find the best for your particular + application's work load. + + If it is not defined, the default is ``1``. + + This setting only affects the Gthread worker type. + + .. note:: + If you try to use the ``sync`` worker type and set the ``threads`` + setting to more than 1, the ``gthread`` worker type will be used + instead. + """ + + +class WorkerConnections(Setting): + name = "worker_connections" + section = "Worker Processes" + cli = ["--worker-connections"] + meta = "INT" + validator = validate_pos_int + type = int + default = 1000 + desc = """\ + The maximum number of simultaneous clients. + + This setting only affects the Eventlet and Gevent worker types. + """ + + +class MaxRequests(Setting): + name = "max_requests" + section = "Worker Processes" + cli = ["--max-requests"] + meta = "INT" + validator = validate_pos_int + type = int + default = 0 + desc = """\ + The maximum number of requests a worker will process before restarting. + + Any value greater than zero will limit the number of requests a work + will process before automatically restarting. This is a simple method + to help limit the damage of memory leaks. + + If this is set to zero (the default) then the automatic worker + restarts are disabled. + """ + + +class MaxRequestsJitter(Setting): + name = "max_requests_jitter" + section = "Worker Processes" + cli = ["--max-requests-jitter"] + meta = "INT" + validator = validate_pos_int + type = int + default = 0 + desc = """\ + The maximum jitter to add to the *max_requests* setting. + + The jitter causes the restart per worker to be randomized by + ``randint(0, max_requests_jitter)``. This is intended to stagger worker + restarts to avoid all workers restarting at the same time. + + .. versionadded:: 19.2 + """ + + +class Timeout(Setting): + name = "timeout" + section = "Worker Processes" + cli = ["-t", "--timeout"] + meta = "INT" + validator = validate_pos_int + type = int + default = 30 + desc = """\ + Workers silent for more than this many seconds are killed and restarted. + + Generally set to thirty seconds. Only set this noticeably higher if + you're sure of the repercussions for sync workers. For the non sync + workers it just means that the worker process is still communicating and + is not tied to the length of time required to handle a single request. + """ + + +class GracefulTimeout(Setting): + name = "graceful_timeout" + section = "Worker Processes" + cli = ["--graceful-timeout"] + meta = "INT" + validator = validate_pos_int + type = int + default = 30 + desc = """\ + Timeout for graceful workers restart. + + After receiving a restart signal, workers have this much time to finish + serving requests. Workers still alive after the timeout (starting from + the receipt of the restart signal) are force killed. + """ + + +class Keepalive(Setting): + name = "keepalive" + section = "Worker Processes" + cli = ["--keep-alive"] + meta = "INT" + validator = validate_pos_int + type = int + default = 2 + desc = """\ + The number of seconds to wait for requests on a Keep-Alive connection. + + Generally set in the 1-5 seconds range for servers with direct connection + to the client (e.g. when you don't have separate load balancer). When + Gunicorn is deployed behind a load balancer, it often makes sense to + set this to a higher value. + + .. note:: + ``sync`` worker does not support persistent connections and will + ignore this option. + """ + + +class LimitRequestLine(Setting): + name = "limit_request_line" + section = "Security" + cli = ["--limit-request-line"] + meta = "INT" + validator = validate_pos_int + type = int + default = 4094 + desc = """\ + The maximum size of HTTP request line in bytes. + + This parameter is used to limit the allowed size of a client's + HTTP request-line. Since the request-line consists of the HTTP + method, URI, and protocol version, this directive places a + restriction on the length of a request-URI allowed for a request + on the server. A server needs this value to be large enough to + hold any of its resource names, including any information that + might be passed in the query part of a GET request. Value is a number + from 0 (unlimited) to 8190. + + This parameter can be used to prevent any DDOS attack. + """ + + +class LimitRequestFields(Setting): + name = "limit_request_fields" + section = "Security" + cli = ["--limit-request-fields"] + meta = "INT" + validator = validate_pos_int + type = int + default = 100 + desc = """\ + Limit the number of HTTP headers fields in a request. + + This parameter is used to limit the number of headers in a request to + prevent DDOS attack. Used with the *limit_request_field_size* it allows + more safety. By default this value is 100 and can't be larger than + 32768. + """ + + +class LimitRequestFieldSize(Setting): + name = "limit_request_field_size" + section = "Security" + cli = ["--limit-request-field_size"] + meta = "INT" + validator = validate_pos_int + type = int + default = 8190 + desc = """\ + Limit the allowed size of an HTTP request header field. + + Value is a positive number or 0. Setting it to 0 will allow unlimited + header field sizes. + + .. warning:: + Setting this parameter to a very high or unlimited value can open + up for DDOS attacks. + """ + + +class Reload(Setting): + name = "reload" + section = 'Debugging' + cli = ['--reload'] + validator = validate_bool + action = 'store_true' + default = False + + desc = '''\ + Restart workers when code changes. + + This setting is intended for development. It will cause workers to be + restarted whenever application code changes. + + The reloader is incompatible with application preloading. When using a + paste configuration be sure that the server block does not import any + application code or the reload will not work as designed. + + The default behavior is to attempt inotify with a fallback to file + system polling. Generally, inotify should be preferred if available + because it consumes less system resources. + + .. note:: + In order to use the inotify reloader, you must have the ``inotify`` + package installed. + ''' + + +class ReloadEngine(Setting): + name = "reload_engine" + section = "Debugging" + cli = ["--reload-engine"] + meta = "STRING" + validator = validate_reload_engine + default = "auto" + desc = """\ + The implementation that should be used to power :ref:`reload`. + + Valid engines are: + + * 'auto' + * 'poll' + * 'inotify' (requires inotify) + + .. versionadded:: 19.7 + """ + + +class ReloadExtraFiles(Setting): + name = "reload_extra_files" + action = "append" + section = "Debugging" + cli = ["--reload-extra-file"] + meta = "FILES" + validator = validate_list_of_existing_files + default = [] + desc = """\ + Extends :ref:`reload` option to also watch and reload on additional files + (e.g., templates, configurations, specifications, etc.). + + .. versionadded:: 19.8 + """ + + +class Spew(Setting): + name = "spew" + section = "Debugging" + cli = ["--spew"] + validator = validate_bool + action = "store_true" + default = False + desc = """\ + Install a trace function that spews every line executed by the server. + + This is the nuclear option. + """ + + +class ConfigCheck(Setting): + name = "check_config" + section = "Debugging" + cli = ["--check-config"] + validator = validate_bool + action = "store_true" + default = False + desc = """\ + Check the configuration. + """ + + +class PreloadApp(Setting): + name = "preload_app" + section = "Server Mechanics" + cli = ["--preload"] + validator = validate_bool + action = "store_true" + default = False + desc = """\ + Load application code before the worker processes are forked. + + By preloading an application you can save some RAM resources as well as + speed up server boot times. Although, if you defer application loading + to each worker process, you can reload your application code easily by + restarting workers. + """ + + +class Sendfile(Setting): + name = "sendfile" + section = "Server Mechanics" + cli = ["--no-sendfile"] + validator = validate_bool + action = "store_const" + const = False + + desc = """\ + Disables the use of ``sendfile()``. + + If not set, the value of the ``SENDFILE`` environment variable is used + to enable or disable its usage. + + .. versionadded:: 19.2 + .. versionchanged:: 19.4 + Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow + disabling. + .. versionchanged:: 19.6 + added support for the ``SENDFILE`` environment variable + """ + + +class ReusePort(Setting): + name = "reuse_port" + section = "Server Mechanics" + cli = ["--reuse-port"] + validator = validate_bool + action = "store_true" + default = False + + desc = """\ + Set the ``SO_REUSEPORT`` flag on the listening socket. + + .. versionadded:: 19.8 + """ + + +class Chdir(Setting): + name = "chdir" + section = "Server Mechanics" + cli = ["--chdir"] + validator = validate_chdir + default = util.getcwd() + desc = """\ + Chdir to specified directory before apps loading. + """ + + +class Daemon(Setting): + name = "daemon" + section = "Server Mechanics" + cli = ["-D", "--daemon"] + validator = validate_bool + action = "store_true" + default = False + desc = """\ + Daemonize the Gunicorn process. + + Detaches the server from the controlling terminal and enters the + background. + """ + +class Env(Setting): + name = "raw_env" + action = "append" + section = "Server Mechanics" + cli = ["-e", "--env"] + meta = "ENV" + validator = validate_list_string + default = [] + + desc = """\ + Set environment variable (key=value). + + Pass variables to the execution environment. Ex.:: + + $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app + + and test for the foo variable environment in your application. + """ + + +class Pidfile(Setting): + name = "pidfile" + section = "Server Mechanics" + cli = ["-p", "--pid"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + A filename to use for the PID file. + + If not set, no PID file will be written. + """ + +class WorkerTmpDir(Setting): + name = "worker_tmp_dir" + section = "Server Mechanics" + cli = ["--worker-tmp-dir"] + meta = "DIR" + validator = validate_string + default = None + desc = """\ + A directory to use for the worker heartbeat temporary file. + + If not set, the default temporary directory will be used. + + .. note:: + The current heartbeat system involves calling ``os.fchmod`` on + temporary file handlers and may block a worker for arbitrary time + if the directory is on a disk-backed filesystem. + + See :ref:`blocking-os-fchmod` for more detailed information + and a solution for avoiding this problem. + """ + + +class User(Setting): + name = "user" + section = "Server Mechanics" + cli = ["-u", "--user"] + meta = "USER" + validator = validate_user + default = os.geteuid() + desc = """\ + Switch worker processes to run as this user. + + A valid user id (as an integer) or the name of a user that can be + retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not + change the worker process user. + """ + + +class Group(Setting): + name = "group" + section = "Server Mechanics" + cli = ["-g", "--group"] + meta = "GROUP" + validator = validate_group + default = os.getegid() + desc = """\ + Switch worker process to run as this group. + + A valid group id (as an integer) or the name of a user that can be + retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not + change the worker processes group. + """ + +class Umask(Setting): + name = "umask" + section = "Server Mechanics" + cli = ["-m", "--umask"] + meta = "INT" + validator = validate_pos_int + type = auto_int + default = 0 + desc = """\ + A bit mask for the file mode on files written by Gunicorn. + + Note that this affects unix socket permissions. + + A valid value for the ``os.umask(mode)`` call or a string compatible + with ``int(value, 0)`` (``0`` means Python guesses the base, so values + like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal + representations) + """ + + +class Initgroups(Setting): + name = "initgroups" + section = "Server Mechanics" + cli = ["--initgroups"] + validator = validate_bool + action = 'store_true' + default = False + + desc = """\ + If true, set the worker process's group access list with all of the + groups of which the specified username is a member, plus the specified + group id. + + .. versionadded:: 19.7 + """ + + +class TmpUploadDir(Setting): + name = "tmp_upload_dir" + section = "Server Mechanics" + meta = "DIR" + validator = validate_string + default = None + desc = """\ + Directory to store temporary request data as they are read. + + This may disappear in the near future. + + This path should be writable by the process permissions set for Gunicorn + workers. If not specified, Gunicorn will choose a system generated + temporary directory. + """ + + +class SecureSchemeHeader(Setting): + name = "secure_scheme_headers" + section = "Server Mechanics" + validator = validate_dict + default = { + "X-FORWARDED-PROTOCOL": "ssl", + "X-FORWARDED-PROTO": "https", + "X-FORWARDED-SSL": "on" + } + desc = """\ + + A dictionary containing headers and values that the front-end proxy + uses to indicate HTTPS requests. These tell Gunicorn to set + ``wsgi.url_scheme`` to ``https``, so your application can tell that the + request is secure. + + The dictionary should map upper-case header names to exact string + values. The value comparisons are case-sensitive, unlike the header + names, so make sure they're exactly what your front-end proxy sends + when handling HTTPS requests. + + It is important that your front-end proxy configuration ensures that + the headers defined here can not be passed directly from the client. + """ + + +class ForwardedAllowIPS(Setting): + name = "forwarded_allow_ips" + section = "Server Mechanics" + cli = ["--forwarded-allow-ips"] + meta = "STRING" + validator = validate_string_to_list + default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1") + desc = """\ + Front-end's IPs from which allowed to handle set secure headers. + (comma separate). + + Set to ``*`` to disable checking of Front-end IPs (useful for setups + where you don't know in advance the IP address of Front-end, but + you still trust the environment). + + By default, the value of the ``FORWARDED_ALLOW_IPS`` environment + variable. If it is not defined, the default is ``"127.0.0.1"``. + """ + + +class AccessLog(Setting): + name = "accesslog" + section = "Logging" + cli = ["--access-logfile"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + The Access log file to write to. + + ``'-'`` means log to stdout. + """ + +class DisableRedirectAccessToSyslog(Setting): + name = "disable_redirect_access_to_syslog" + section = "Logging" + cli = ["--disable-redirect-access-to-syslog"] + validator = validate_bool + action = 'store_true' + default = False + desc = """\ + Disable redirect access logs to syslog. + + .. versionadded:: 19.8 + """ + + +class AccessLogFormat(Setting): + name = "access_log_format" + section = "Logging" + cli = ["--access-logformat"] + meta = "STRING" + validator = validate_string + default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' + desc = """\ + The access log format. + + =========== =========== + Identifier Description + =========== =========== + h remote address + l ``'-'`` + u user name + t date of the request + r status line (e.g. ``GET / HTTP/1.1``) + m request method + U URL path without query string + q query string + H protocol + s status + B response length + b response length or ``'-'`` (CLF format) + f referer + a user agent + T request time in seconds + D request time in microseconds + L request time in decimal seconds + p process ID + {Header}i request header + {Header}o response header + {Variable}e environment variable + =========== =========== + """ + + +class ErrorLog(Setting): + name = "errorlog" + section = "Logging" + cli = ["--error-logfile", "--log-file"] + meta = "FILE" + validator = validate_string + default = '-' + desc = """\ + The Error log file to write to. + + Using ``'-'`` for FILE makes gunicorn log to stderr. + + .. versionchanged:: 19.2 + Log to stderr by default. + + """ + + +class Loglevel(Setting): + name = "loglevel" + section = "Logging" + cli = ["--log-level"] + meta = "LEVEL" + validator = validate_string + default = "info" + desc = """\ + The granularity of Error log outputs. + + Valid level names are: + + * debug + * info + * warning + * error + * critical + """ + + +class CaptureOutput(Setting): + name = "capture_output" + section = "Logging" + cli = ["--capture-output"] + validator = validate_bool + action = 'store_true' + default = False + desc = """\ + Redirect stdout/stderr to specified file in :ref:`errorlog`. + + .. versionadded:: 19.6 + """ + + +class LoggerClass(Setting): + name = "logger_class" + section = "Logging" + cli = ["--logger-class"] + meta = "STRING" + validator = validate_class + default = "gunicorn.glogging.Logger" + desc = """\ + The logger you want to use to log events in Gunicorn. + + The default class (``gunicorn.glogging.Logger``) handle most of + normal usages in logging. It provides error and access logging. + + You can provide your own logger by giving Gunicorn a + Python path to a subclass like ``gunicorn.glogging.Logger``. + """ + + +class LogConfig(Setting): + name = "logconfig" + section = "Logging" + cli = ["--log-config"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + The log config file to use. + Gunicorn uses the standard Python logging module's Configuration + file format. + """ + + +class LogConfigDict(Setting): + name = "logconfig_dict" + section = "Logging" + cli = ["--log-config-dict"] + validator = validate_dict + default = {} + desc = """\ + The log config dictionary to use, using the standard Python + logging module's dictionary configuration format. This option + takes precedence over the :ref:`logconfig` option, which uses the + older file configuration format. + + Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig + + .. versionadded:: 19.8 + """ + + +class SyslogTo(Setting): + name = "syslog_addr" + section = "Logging" + cli = ["--log-syslog-to"] + meta = "SYSLOG_ADDR" + validator = validate_string + + if PLATFORM == "darwin": + default = "unix:///var/run/syslog" + elif PLATFORM in ('freebsd', 'dragonfly', ): + default = "unix:///var/run/log" + elif PLATFORM == "openbsd": + default = "unix:///dev/log" + else: + default = "udp://localhost:514" + + desc = """\ + Address to send syslog messages. + + Address is a string of the form: + + * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream`` + for the stream driver or ``dgram`` for the dgram driver. + ``stream`` is the default. + * ``udp://HOST:PORT`` : for UDP sockets + * ``tcp://HOST:PORT`` : for TCP sockets + + """ + + +class Syslog(Setting): + name = "syslog" + section = "Logging" + cli = ["--log-syslog"] + validator = validate_bool + action = 'store_true' + default = False + desc = """\ + Send *Gunicorn* logs to syslog. + + .. versionchanged:: 19.8 + You can now disable sending access logs by using the + :ref:`disable-redirect-access-to-syslog` setting. + """ + + +class SyslogPrefix(Setting): + name = "syslog_prefix" + section = "Logging" + cli = ["--log-syslog-prefix"] + meta = "SYSLOG_PREFIX" + validator = validate_string + default = None + desc = """\ + Makes Gunicorn use the parameter as program-name in the syslog entries. + + All entries will be prefixed by ``gunicorn.``. By default the + program name is the name of the process. + """ + + +class SyslogFacility(Setting): + name = "syslog_facility" + section = "Logging" + cli = ["--log-syslog-facility"] + meta = "SYSLOG_FACILITY" + validator = validate_string + default = "user" + desc = """\ + Syslog facility name + """ + + +class EnableStdioInheritance(Setting): + name = "enable_stdio_inheritance" + section = "Logging" + cli = ["-R", "--enable-stdio-inheritance"] + validator = validate_bool + default = False + action = "store_true" + desc = """\ + Enable stdio inheritance. + + Enable inheritance for stdio file descriptors in daemon mode. + + Note: To disable the Python stdout buffering, you can to set the user + environment variable ``PYTHONUNBUFFERED`` . + """ + + +# statsD monitoring +class StatsdHost(Setting): + name = "statsd_host" + section = "Logging" + cli = ["--statsd-host"] + meta = "STATSD_ADDR" + default = None + validator = validate_hostport + desc = """\ + ``host:port`` of the statsd server to log to. + + .. versionadded:: 19.1 + """ + +class StatsdPrefix(Setting): + name = "statsd_prefix" + section = "Logging" + cli = ["--statsd-prefix"] + meta = "STATSD_PREFIX" + default = "" + validator = validate_string + desc = """\ + Prefix to use when emitting statsd metrics (a trailing ``.`` is added, + if not provided). + + .. versionadded:: 19.2 + """ + + +class Procname(Setting): + name = "proc_name" + section = "Process Naming" + cli = ["-n", "--name"] + meta = "STRING" + validator = validate_string + default = None + desc = """\ + A base to use with setproctitle for process naming. + + This affects things like ``ps`` and ``top``. If you're going to be + running more than one instance of Gunicorn you'll probably want to set a + name to tell them apart. This requires that you install the setproctitle + module. + + If not set, the *default_proc_name* setting will be used. + """ + + +class DefaultProcName(Setting): + name = "default_proc_name" + section = "Process Naming" + validator = validate_string + default = "gunicorn" + desc = """\ + Internal setting that is adjusted for each type of application. + """ + + +class PythonPath(Setting): + name = "pythonpath" + section = "Server Mechanics" + cli = ["--pythonpath"] + meta = "STRING" + validator = validate_string + default = None + desc = """\ + A comma-separated list of directories to add to the Python path. + + e.g. + ``'/home/djangoprojects/myproject,/home/python/mylibrary'``. + """ + + +class Paste(Setting): + name = "paste" + section = "Server Mechanics" + cli = ["--paste", "--paster"] + meta = "STRING" + validator = validate_string + default = None + desc = """\ + Load a PasteDeploy config file. The argument may contain a ``#`` + symbol followed by the name of an app section from the config file, + e.g. ``production.ini#admin``. + + At this time, using alternate server blocks is not supported. Use the + command line arguments to control server configuration instead. + """ + + +class OnStarting(Setting): + name = "on_starting" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def on_starting(server): + pass + default = staticmethod(on_starting) + desc = """\ + Called just before the master process is initialized. + + The callable needs to accept a single instance variable for the Arbiter. + """ + + +class OnReload(Setting): + name = "on_reload" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def on_reload(server): + pass + default = staticmethod(on_reload) + desc = """\ + Called to recycle workers during a reload via SIGHUP. + + The callable needs to accept a single instance variable for the Arbiter. + """ + + +class WhenReady(Setting): + name = "when_ready" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def when_ready(server): + pass + default = staticmethod(when_ready) + desc = """\ + Called just after the server is started. + + The callable needs to accept a single instance variable for the Arbiter. + """ + + +class Prefork(Setting): + name = "pre_fork" + section = "Server Hooks" + validator = validate_callable(2) + type = six.callable + + def pre_fork(server, worker): + pass + default = staticmethod(pre_fork) + desc = """\ + Called just before a worker is forked. + + The callable needs to accept two instance variables for the Arbiter and + new Worker. + """ + + +class Postfork(Setting): + name = "post_fork" + section = "Server Hooks" + validator = validate_callable(2) + type = six.callable + + def post_fork(server, worker): + pass + default = staticmethod(post_fork) + desc = """\ + Called just after a worker has been forked. + + The callable needs to accept two instance variables for the Arbiter and + new Worker. + """ + + +class PostWorkerInit(Setting): + name = "post_worker_init" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def post_worker_init(worker): + pass + + default = staticmethod(post_worker_init) + desc = """\ + Called just after a worker has initialized the application. + + The callable needs to accept one instance variable for the initialized + Worker. + """ + +class WorkerInt(Setting): + name = "worker_int" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def worker_int(worker): + pass + + default = staticmethod(worker_int) + desc = """\ + Called just after a worker exited on SIGINT or SIGQUIT. + + The callable needs to accept one instance variable for the initialized + Worker. + """ + + +class WorkerAbort(Setting): + name = "worker_abort" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def worker_abort(worker): + pass + + default = staticmethod(worker_abort) + desc = """\ + Called when a worker received the SIGABRT signal. + + This call generally happens on timeout. + + The callable needs to accept one instance variable for the initialized + Worker. + """ + + +class PreExec(Setting): + name = "pre_exec" + section = "Server Hooks" + validator = validate_callable(1) + type = six.callable + + def pre_exec(server): + pass + default = staticmethod(pre_exec) + desc = """\ + Called just before a new master process is forked. + + The callable needs to accept a single instance variable for the Arbiter. + """ + + +class PreRequest(Setting): + name = "pre_request" + section = "Server Hooks" + validator = validate_callable(2) + type = six.callable + + def pre_request(worker, req): + worker.log.debug("%s %s" % (req.method, req.path)) + default = staticmethod(pre_request) + desc = """\ + Called just before a worker processes the request. + + The callable needs to accept two instance variables for the Worker and + the Request. + """ + + +class PostRequest(Setting): + name = "post_request" + section = "Server Hooks" + validator = validate_post_request + type = six.callable + + def post_request(worker, req, environ, resp): + pass + default = staticmethod(post_request) + desc = """\ + Called after a worker processes the request. + + The callable needs to accept two instance variables for the Worker and + the Request. + """ + + +class ChildExit(Setting): + name = "child_exit" + section = "Server Hooks" + validator = validate_callable(2) + type = six.callable + + def child_exit(server, worker): + pass + default = staticmethod(child_exit) + desc = """\ + Called just after a worker has been exited, in the master process. + + The callable needs to accept two instance variables for the Arbiter and + the just-exited Worker. + + .. versionadded:: 19.7 + """ + + +class WorkerExit(Setting): + name = "worker_exit" + section = "Server Hooks" + validator = validate_callable(2) + type = six.callable + + def worker_exit(server, worker): + pass + default = staticmethod(worker_exit) + desc = """\ + Called just after a worker has been exited, in the worker process. + + The callable needs to accept two instance variables for the Arbiter and + the just-exited Worker. + """ + + +class NumWorkersChanged(Setting): + name = "nworkers_changed" + section = "Server Hooks" + validator = validate_callable(3) + type = six.callable + + def nworkers_changed(server, new_value, old_value): + pass + default = staticmethod(nworkers_changed) + desc = """\ + Called just after *num_workers* has been changed. + + The callable needs to accept an instance variable of the Arbiter and + two integers of number of workers after and before change. + + If the number of workers is set for the first time, *old_value* would + be ``None``. + """ + +class OnExit(Setting): + name = "on_exit" + section = "Server Hooks" + validator = validate_callable(1) + + def on_exit(server): + pass + + default = staticmethod(on_exit) + desc = """\ + Called just before exiting Gunicorn. + + The callable needs to accept a single instance variable for the Arbiter. + """ + + +class ProxyProtocol(Setting): + name = "proxy_protocol" + section = "Server Mechanics" + cli = ["--proxy-protocol"] + validator = validate_bool + default = False + action = "store_true" + desc = """\ + Enable detect PROXY protocol (PROXY mode). + + Allow using HTTP and Proxy together. It may be useful for work with + stunnel as HTTPS frontend and Gunicorn as HTTP server. + + PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt + + Example for stunnel config:: + + [https] + protocol = proxy + accept = 443 + connect = 80 + cert = /etc/ssl/certs/stunnel.pem + key = /etc/ssl/certs/stunnel.key + """ + + +class ProxyAllowFrom(Setting): + name = "proxy_allow_ips" + section = "Server Mechanics" + cli = ["--proxy-allow-from"] + validator = validate_string_to_list + default = "127.0.0.1" + desc = """\ + Front-end's IPs from which allowed accept proxy requests (comma separate). + + Set to ``*`` to disable checking of Front-end IPs (useful for setups + where you don't know in advance the IP address of Front-end, but + you still trust the environment) + """ + + +class KeyFile(Setting): + name = "keyfile" + section = "SSL" + cli = ["--keyfile"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + SSL key file + """ + + +class CertFile(Setting): + name = "certfile" + section = "SSL" + cli = ["--certfile"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + SSL certificate file + """ + +class SSLVersion(Setting): + name = "ssl_version" + section = "SSL" + cli = ["--ssl-version"] + validator = validate_pos_int + default = ssl.PROTOCOL_SSLv23 + desc = """\ + SSL version to use (see stdlib ssl module's) + + .. versionchanged:: 19.7 + The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to + ``ssl.PROTOCOL_SSLv23``. + """ + +class CertReqs(Setting): + name = "cert_reqs" + section = "SSL" + cli = ["--cert-reqs"] + validator = validate_pos_int + default = ssl.CERT_NONE + desc = """\ + Whether client certificate is required (see stdlib ssl module's) + """ + +class CACerts(Setting): + name = "ca_certs" + section = "SSL" + cli = ["--ca-certs"] + meta = "FILE" + validator = validate_string + default = None + desc = """\ + CA certificates file + """ + +class SuppressRaggedEOFs(Setting): + name = "suppress_ragged_eofs" + section = "SSL" + cli = ["--suppress-ragged-eofs"] + action = "store_true" + default = True + validator = validate_bool + desc = """\ + Suppress ragged EOFs (see stdlib ssl module's) + """ + +class DoHandshakeOnConnect(Setting): + name = "do_handshake_on_connect" + section = "SSL" + cli = ["--do-handshake-on-connect"] + validator = validate_bool + action = "store_true" + default = False + desc = """\ + Whether to perform SSL handshake on socket connect (see stdlib ssl module's) + """ + + +if sys.version_info >= (2, 7): + class Ciphers(Setting): + name = "ciphers" + section = "SSL" + cli = ["--ciphers"] + validator = validate_string + default = 'TLSv1' + desc = """\ + Ciphers to use (see stdlib ssl module's) + """ + + +class PasteGlobalConf(Setting): + name = "raw_paste_global_conf" + action = "append" + section = "Server Mechanics" + cli = ["--paste-global"] + meta = "CONF" + validator = validate_list_string + default = [] + + desc = """\ + Set a PasteDeploy global config variable in ``key=value`` form. + + The option can be specified multiple times. + + The variables are passed to the the PasteDeploy entrypoint. Example:: + + $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2 + + .. versionadded:: 19.7 + """ diff --git a/venv/lib/python3.6/site-packages/gunicorn/debug.py b/venv/lib/python3.6/site-packages/gunicorn/debug.py new file mode 100644 index 0000000..996fe1b --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/debug.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +"""The debug module contains utilities and functions for better +debugging Gunicorn.""" + +import sys +import linecache +import re +import inspect + +__all__ = ['spew', 'unspew'] + +_token_spliter = re.compile(r'\W+') + + +class Spew(object): + + def __init__(self, trace_names=None, show_values=True): + self.trace_names = trace_names + self.show_values = show_values + + def __call__(self, frame, event, arg): + if event == 'line': + lineno = frame.f_lineno + if '__file__' in frame.f_globals: + filename = frame.f_globals['__file__'] + if (filename.endswith('.pyc') or + filename.endswith('.pyo')): + filename = filename[:-1] + name = frame.f_globals['__name__'] + line = linecache.getline(filename, lineno) + else: + name = '[unknown]' + try: + src = inspect.getsourcelines(frame) + line = src[lineno] + except IOError: + line = 'Unknown code named [%s]. VM instruction #%d' % ( + frame.f_code.co_name, frame.f_lasti) + if self.trace_names is None or name in self.trace_names: + print('%s:%s: %s' % (name, lineno, line.rstrip())) + if not self.show_values: + return self + details = [] + tokens = _token_spliter.split(line) + for tok in tokens: + if tok in frame.f_globals: + details.append('%s=%r' % (tok, frame.f_globals[tok])) + if tok in frame.f_locals: + details.append('%s=%r' % (tok, frame.f_locals[tok])) + if details: + print("\t%s" % ' '.join(details)) + return self + + +def spew(trace_names=None, show_values=False): + """Install a trace hook which writes incredibly detailed logs + about what code is being executed to stdout. + """ + sys.settrace(Spew(trace_names, show_values)) + + +def unspew(): + """Remove the trace hook installed by spew. + """ + sys.settrace(None) diff --git a/venv/lib/python3.6/site-packages/gunicorn/errors.py b/venv/lib/python3.6/site-packages/gunicorn/errors.py new file mode 100644 index 0000000..727d336 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/errors.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +# We don't need to call super() in __init__ methods of our +# BaseException and Exception classes because we also define +# our own __str__ methods so there is no need to pass 'message' +# to the base class to get a meaningful output from 'str(exc)'. +# pylint: disable=super-init-not-called + + +# we inherit from BaseException here to make sure to not be caught +# at application level +class HaltServer(BaseException): + def __init__(self, reason, exit_status=1): + self.reason = reason + self.exit_status = exit_status + + def __str__(self): + return "" % (self.reason, self.exit_status) + + +class ConfigError(Exception): + """ Exception raised on config error """ + + +class AppImportError(Exception): + """ Exception raised when loading an application """ diff --git a/venv/lib/python3.6/site-packages/gunicorn/glogging.py b/venv/lib/python3.6/site-packages/gunicorn/glogging.py new file mode 100644 index 0000000..041a74d --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/glogging.py @@ -0,0 +1,478 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import base64 +import binascii +import time +import logging +logging.Logger.manager.emittedNoHandlerWarning = 1 +from logging.config import fileConfig +try: + from logging.config import dictConfig +except ImportError: + # python 2.6 + dictConfig = None +import os +import socket +import sys +import threading +import traceback + +from gunicorn import util +from gunicorn.six import PY3, string_types + + +# syslog facility codes +SYSLOG_FACILITIES = { + "auth": 4, + "authpriv": 10, + "cron": 9, + "daemon": 3, + "ftp": 11, + "kern": 0, + "lpr": 6, + "mail": 2, + "news": 7, + "security": 4, # DEPRECATED + "syslog": 5, + "user": 1, + "uucp": 8, + "local0": 16, + "local1": 17, + "local2": 18, + "local3": 19, + "local4": 20, + "local5": 21, + "local6": 22, + "local7": 23 + } + + +CONFIG_DEFAULTS = dict( + version=1, + disable_existing_loggers=False, + + loggers={ + "root": {"level": "INFO", "handlers": ["console"]}, + "gunicorn.error": { + "level": "INFO", + "handlers": ["error_console"], + "propagate": True, + "qualname": "gunicorn.error" + }, + + "gunicorn.access": { + "level": "INFO", + "handlers": ["console"], + "propagate": True, + "qualname": "gunicorn.access" + } + }, + handlers={ + "console": { + "class": "logging.StreamHandler", + "formatter": "generic", + "stream": "ext://sys.stdout" + }, + "error_console": { + "class": "logging.StreamHandler", + "formatter": "generic", + "stream": "ext://sys.stderr" + }, + }, + formatters={ + "generic": { + "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s", + "datefmt": "[%Y-%m-%d %H:%M:%S %z]", + "class": "logging.Formatter" + } + } +) + + +def loggers(): + """ get list of all loggers """ + root = logging.root + existing = root.manager.loggerDict.keys() + return [logging.getLogger(name) for name in existing] + + +class SafeAtoms(dict): + + def __init__(self, atoms): + dict.__init__(self) + for key, value in atoms.items(): + if isinstance(value, string_types): + self[key] = value.replace('"', '\\"') + else: + self[key] = value + + def __getitem__(self, k): + if k.startswith("{"): + kl = k.lower() + if kl in self: + return super(SafeAtoms, self).__getitem__(kl) + else: + return "-" + if k in self: + return super(SafeAtoms, self).__getitem__(k) + else: + return '-' + + +def parse_syslog_address(addr): + + # unix domain socket type depends on backend + # SysLogHandler will try both when given None + if addr.startswith("unix://"): + sock_type = None + + # set socket type only if explicitly requested + parts = addr.split("#", 1) + if len(parts) == 2: + addr = parts[0] + if parts[1] == "dgram": + sock_type = socket.SOCK_DGRAM + + return (sock_type, addr.split("unix://")[1]) + + if addr.startswith("udp://"): + addr = addr.split("udp://")[1] + socktype = socket.SOCK_DGRAM + elif addr.startswith("tcp://"): + addr = addr.split("tcp://")[1] + socktype = socket.SOCK_STREAM + else: + raise RuntimeError("invalid syslog address") + + if '[' in addr and ']' in addr: + host = addr.split(']')[0][1:].lower() + elif ':' in addr: + host = addr.split(':')[0].lower() + elif addr == "": + host = "localhost" + else: + host = addr.lower() + + addr = addr.split(']')[-1] + if ":" in addr: + port = addr.split(':', 1)[1] + if not port.isdigit(): + raise RuntimeError("%r is not a valid port number." % port) + port = int(port) + else: + port = 514 + + return (socktype, (host, port)) + + +class Logger(object): + + LOG_LEVELS = { + "critical": logging.CRITICAL, + "error": logging.ERROR, + "warning": logging.WARNING, + "info": logging.INFO, + "debug": logging.DEBUG + } + loglevel = logging.INFO + + error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s" + datefmt = r"[%Y-%m-%d %H:%M:%S %z]" + + access_fmt = "%(message)s" + syslog_fmt = "[%(process)d] %(message)s" + + atoms_wrapper_class = SafeAtoms + + def __init__(self, cfg): + self.error_log = logging.getLogger("gunicorn.error") + self.error_log.propagate = False + self.access_log = logging.getLogger("gunicorn.access") + self.access_log.propagate = False + self.error_handlers = [] + self.access_handlers = [] + self.logfile = None + self.lock = threading.Lock() + self.cfg = cfg + self.setup(cfg) + + def setup(self, cfg): + self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO) + self.error_log.setLevel(self.loglevel) + self.access_log.setLevel(logging.INFO) + + # set gunicorn.error handler + if self.cfg.capture_output and cfg.errorlog != "-": + for stream in sys.stdout, sys.stderr: + stream.flush() + + self.logfile = open(cfg.errorlog, 'a+') + os.dup2(self.logfile.fileno(), sys.stdout.fileno()) + os.dup2(self.logfile.fileno(), sys.stderr.fileno()) + + self._set_handler(self.error_log, cfg.errorlog, + logging.Formatter(self.error_fmt, self.datefmt)) + + # set gunicorn.access handler + if cfg.accesslog is not None: + self._set_handler(self.access_log, cfg.accesslog, + fmt=logging.Formatter(self.access_fmt), stream=sys.stdout) + + # set syslog handler + if cfg.syslog: + self._set_syslog_handler( + self.error_log, cfg, self.syslog_fmt, "error" + ) + if not cfg.disable_redirect_access_to_syslog: + self._set_syslog_handler( + self.access_log, cfg, self.syslog_fmt, "access" + ) + + if dictConfig is None and cfg.logconfig_dict: + util.warn("Dictionary-based log configuration requires " + "Python 2.7 or above.") + + if dictConfig and cfg.logconfig_dict: + config = CONFIG_DEFAULTS.copy() + config.update(cfg.logconfig_dict) + try: + dictConfig(config) + except ( + AttributeError, + ImportError, + ValueError, + TypeError + ) as exc: + raise RuntimeError(str(exc)) + elif cfg.logconfig: + if os.path.exists(cfg.logconfig): + defaults = CONFIG_DEFAULTS.copy() + defaults['__file__'] = cfg.logconfig + defaults['here'] = os.path.dirname(cfg.logconfig) + fileConfig(cfg.logconfig, defaults=defaults, + disable_existing_loggers=False) + else: + msg = "Error: log config '%s' not found" + raise RuntimeError(msg % cfg.logconfig) + + def critical(self, msg, *args, **kwargs): + self.error_log.critical(msg, *args, **kwargs) + + def error(self, msg, *args, **kwargs): + self.error_log.error(msg, *args, **kwargs) + + def warning(self, msg, *args, **kwargs): + self.error_log.warning(msg, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + self.error_log.info(msg, *args, **kwargs) + + def debug(self, msg, *args, **kwargs): + self.error_log.debug(msg, *args, **kwargs) + + def exception(self, msg, *args, **kwargs): + self.error_log.exception(msg, *args, **kwargs) + + def log(self, lvl, msg, *args, **kwargs): + if isinstance(lvl, string_types): + lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO) + self.error_log.log(lvl, msg, *args, **kwargs) + + def atoms(self, resp, req, environ, request_time): + """ Gets atoms for log formating. + """ + status = resp.status + if isinstance(status, str): + status = status.split(None, 1)[0] + atoms = { + 'h': environ.get('REMOTE_ADDR', '-'), + 'l': '-', + 'u': self._get_user(environ) or '-', + 't': self.now(), + 'r': "%s %s %s" % (environ['REQUEST_METHOD'], + environ['RAW_URI'], environ["SERVER_PROTOCOL"]), + 's': status, + 'm': environ.get('REQUEST_METHOD'), + 'U': environ.get('PATH_INFO'), + 'q': environ.get('QUERY_STRING'), + 'H': environ.get('SERVER_PROTOCOL'), + 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', + 'B': getattr(resp, 'sent', None), + 'f': environ.get('HTTP_REFERER', '-'), + 'a': environ.get('HTTP_USER_AGENT', '-'), + 'T': request_time.seconds, + 'D': (request_time.seconds*1000000) + request_time.microseconds, + 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), + 'p': "<%s>" % os.getpid() + } + + # add request headers + if hasattr(req, 'headers'): + req_headers = req.headers + else: + req_headers = req + + if hasattr(req_headers, "items"): + req_headers = req_headers.items() + + atoms.update(dict([("{%s}i" % k.lower(), v) for k, v in req_headers])) + + resp_headers = resp.headers + if hasattr(resp_headers, "items"): + resp_headers = resp_headers.items() + + # add response headers + atoms.update(dict([("{%s}o" % k.lower(), v) for k, v in resp_headers])) + + # add environ variables + environ_variables = environ.items() + atoms.update(dict([("{%s}e" % k.lower(), v) for k, v in environ_variables])) + + return atoms + + def access(self, resp, req, environ, request_time): + """ See http://httpd.apache.org/docs/2.0/logs.html#combined + for format details + """ + + if not (self.cfg.accesslog or self.cfg.logconfig or + self.cfg.logconfig_dict or + (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)): + return + + # wrap atoms: + # - make sure atoms will be test case insensitively + # - if atom doesn't exist replace it by '-' + safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ, + request_time)) + + try: + self.access_log.info(self.cfg.access_log_format, safe_atoms) + except: + self.error(traceback.format_exc()) + + def now(self): + """ return date in Apache Common Log Format """ + return time.strftime('[%d/%b/%Y:%H:%M:%S %z]') + + def reopen_files(self): + if self.cfg.capture_output and self.cfg.errorlog != "-": + for stream in sys.stdout, sys.stderr: + stream.flush() + + with self.lock: + if self.logfile is not None: + self.logfile.close() + self.logfile = open(self.cfg.errorlog, 'a+') + os.dup2(self.logfile.fileno(), sys.stdout.fileno()) + os.dup2(self.logfile.fileno(), sys.stderr.fileno()) + + + for log in loggers(): + for handler in log.handlers: + if isinstance(handler, logging.FileHandler): + handler.acquire() + try: + if handler.stream: + handler.close() + handler.stream = handler._open() + finally: + handler.release() + + def close_on_exec(self): + for log in loggers(): + for handler in log.handlers: + if isinstance(handler, logging.FileHandler): + handler.acquire() + try: + if handler.stream: + util.close_on_exec(handler.stream.fileno()) + finally: + handler.release() + + def _get_gunicorn_handler(self, log): + for h in log.handlers: + if getattr(h, "_gunicorn", False): + return h + + def _set_handler(self, log, output, fmt, stream=None): + # remove previous gunicorn log handler + h = self._get_gunicorn_handler(log) + if h: + log.handlers.remove(h) + + if output is not None: + if output == "-": + h = logging.StreamHandler(stream) + else: + util.check_is_writeable(output) + h = logging.FileHandler(output) + # make sure the user can reopen the file + try: + os.chown(h.baseFilename, self.cfg.user, self.cfg.group) + except OSError: + # it's probably OK there, we assume the user has given + # /dev/null as a parameter. + pass + + h.setFormatter(fmt) + h._gunicorn = True + log.addHandler(h) + + def _set_syslog_handler(self, log, cfg, fmt, name): + # setup format + if not cfg.syslog_prefix: + prefix = cfg.proc_name.replace(":", ".") + else: + prefix = cfg.syslog_prefix + + prefix = "gunicorn.%s.%s" % (prefix, name) + + # set format + fmt = logging.Formatter(r"%s: %s" % (prefix, fmt)) + + # syslog facility + try: + facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()] + except KeyError: + raise RuntimeError("unknown facility name") + + # parse syslog address + socktype, addr = parse_syslog_address(cfg.syslog_addr) + + # finally setup the syslog handler + if sys.version_info >= (2, 7): + h = logging.handlers.SysLogHandler(address=addr, + facility=facility, socktype=socktype) + else: + # socktype is only supported in 2.7 and sup + # fix issue #541 + h = logging.handlers.SysLogHandler(address=addr, + facility=facility) + + h.setFormatter(fmt) + h._gunicorn = True + log.addHandler(h) + + def _get_user(self, environ): + user = None + http_auth = environ.get("HTTP_AUTHORIZATION") + if http_auth and http_auth.startswith('Basic'): + auth = http_auth.split(" ", 1) + if len(auth) == 2: + try: + # b64decode doesn't accept unicode in Python < 3.3 + # so we need to convert it to a byte string + auth = base64.b64decode(auth[1].strip().encode('utf-8')) + if PY3: # b64decode returns a byte string in Python 3 + auth = auth.decode('utf-8') + auth = auth.split(":", 1) + except (TypeError, binascii.Error, UnicodeDecodeError) as exc: + self.debug("Couldn't get username: %s", exc) + return user + if len(auth) == 2: + user = auth[0] + return user diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__init__.py b/venv/lib/python3.6/site-packages/gunicorn/http/__init__.py new file mode 100644 index 0000000..1da6f3e --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from gunicorn.http.message import Message, Request +from gunicorn.http.parser import RequestParser + +__all__ = ['Message', 'Request', 'RequestParser'] diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/__init__.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..4d30725 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/_sendfile.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/_sendfile.cpython-36.pyc new file mode 100644 index 0000000..02cf7b7 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/_sendfile.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/body.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/body.cpython-36.pyc new file mode 100644 index 0000000..be9975e Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/body.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/errors.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/errors.cpython-36.pyc new file mode 100644 index 0000000..37ca055 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/errors.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/message.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/message.cpython-36.pyc new file mode 100644 index 0000000..2c2a480 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/message.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/parser.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/parser.cpython-36.pyc new file mode 100644 index 0000000..5e8750b Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/parser.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/unreader.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/unreader.cpython-36.pyc new file mode 100644 index 0000000..75308f3 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/unreader.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/wsgi.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/wsgi.cpython-36.pyc new file mode 100644 index 0000000..a127bac Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/http/__pycache__/wsgi.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/_sendfile.py b/venv/lib/python3.6/site-packages/gunicorn/http/_sendfile.py new file mode 100644 index 0000000..1764cb3 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/_sendfile.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import errno +import os +import sys + +try: + import ctypes + import ctypes.util +except MemoryError: + # selinux execmem denial + # https://bugzilla.redhat.com/show_bug.cgi?id=488396 + raise ImportError + +SUPPORTED_PLATFORMS = ( + 'darwin', + 'freebsd', + 'dragonfly', + 'linux2') + +if sys.platform not in SUPPORTED_PLATFORMS: + raise ImportError("sendfile isn't supported on this platform") + +_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True) +_sendfile = _libc.sendfile + + +def sendfile(fdout, fdin, offset, nbytes): + if sys.platform == 'darwin': + _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64, + ctypes.POINTER(ctypes.c_uint64), ctypes.c_voidp, + ctypes.c_int] + _nbytes = ctypes.c_uint64(nbytes) + result = _sendfile(fdin, fdout, offset, _nbytes, None, 0) + + if result == -1: + e = ctypes.get_errno() + if e == errno.EAGAIN and _nbytes.value is not None: + return _nbytes.value + raise OSError(e, os.strerror(e)) + return _nbytes.value + elif sys.platform in ('freebsd', 'dragonfly',): + _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64, + ctypes.c_uint64, ctypes.c_voidp, + ctypes.POINTER(ctypes.c_uint64), ctypes.c_int] + _sbytes = ctypes.c_uint64() + result = _sendfile(fdin, fdout, offset, nbytes, None, _sbytes, 0) + if result == -1: + e = ctypes.get_errno() + if e == errno.EAGAIN and _sbytes.value is not None: + return _sbytes.value + raise OSError(e, os.strerror(e)) + return _sbytes.value + + else: + _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, + ctypes.POINTER(ctypes.c_uint64), ctypes.c_size_t] + + _offset = ctypes.c_uint64(offset) + sent = _sendfile(fdout, fdin, _offset, nbytes) + if sent == -1: + e = ctypes.get_errno() + raise OSError(e, os.strerror(e)) + return sent diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/body.py b/venv/lib/python3.6/site-packages/gunicorn/http/body.py new file mode 100644 index 0000000..fb8633e --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/body.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator, + InvalidChunkSize) +from gunicorn import six + + +class ChunkedReader(object): + def __init__(self, req, unreader): + self.req = req + self.parser = self.parse_chunked(unreader) + self.buf = six.BytesIO() + + def read(self, size): + if not isinstance(size, six.integer_types): + raise TypeError("size must be an integral type") + if size < 0: + raise ValueError("Size must be positive.") + if size == 0: + return b"" + + if self.parser: + while self.buf.tell() < size: + try: + self.buf.write(six.next(self.parser)) + except StopIteration: + self.parser = None + break + + data = self.buf.getvalue() + ret, rest = data[:size], data[size:] + self.buf = six.BytesIO() + self.buf.write(rest) + return ret + + def parse_trailers(self, unreader, data): + buf = six.BytesIO() + buf.write(data) + + idx = buf.getvalue().find(b"\r\n\r\n") + done = buf.getvalue()[:2] == b"\r\n" + while idx < 0 and not done: + self.get_data(unreader, buf) + idx = buf.getvalue().find(b"\r\n\r\n") + done = buf.getvalue()[:2] == b"\r\n" + if done: + unreader.unread(buf.getvalue()[2:]) + return b"" + self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx]) + unreader.unread(buf.getvalue()[idx + 4:]) + + def parse_chunked(self, unreader): + (size, rest) = self.parse_chunk_size(unreader) + while size > 0: + while size > len(rest): + size -= len(rest) + yield rest + rest = unreader.read() + if not rest: + raise NoMoreData() + yield rest[:size] + # Remove \r\n after chunk + rest = rest[size:] + while len(rest) < 2: + rest += unreader.read() + if rest[:2] != b'\r\n': + raise ChunkMissingTerminator(rest[:2]) + (size, rest) = self.parse_chunk_size(unreader, data=rest[2:]) + + def parse_chunk_size(self, unreader, data=None): + buf = six.BytesIO() + if data is not None: + buf.write(data) + + idx = buf.getvalue().find(b"\r\n") + while idx < 0: + self.get_data(unreader, buf) + idx = buf.getvalue().find(b"\r\n") + + data = buf.getvalue() + line, rest_chunk = data[:idx], data[idx + 2:] + + chunk_size = line.split(b";", 1)[0].strip() + try: + chunk_size = int(chunk_size, 16) + except ValueError: + raise InvalidChunkSize(chunk_size) + + if chunk_size == 0: + try: + self.parse_trailers(unreader, rest_chunk) + except NoMoreData: + pass + return (0, None) + return (chunk_size, rest_chunk) + + def get_data(self, unreader, buf): + data = unreader.read() + if not data: + raise NoMoreData() + buf.write(data) + + +class LengthReader(object): + def __init__(self, unreader, length): + self.unreader = unreader + self.length = length + + def read(self, size): + if not isinstance(size, six.integer_types): + raise TypeError("size must be an integral type") + + size = min(self.length, size) + if size < 0: + raise ValueError("Size must be positive.") + if size == 0: + return b"" + + buf = six.BytesIO() + data = self.unreader.read() + while data: + buf.write(data) + if buf.tell() >= size: + break + data = self.unreader.read() + + buf = buf.getvalue() + ret, rest = buf[:size], buf[size:] + self.unreader.unread(rest) + self.length -= size + return ret + + +class EOFReader(object): + def __init__(self, unreader): + self.unreader = unreader + self.buf = six.BytesIO() + self.finished = False + + def read(self, size): + if not isinstance(size, six.integer_types): + raise TypeError("size must be an integral type") + if size < 0: + raise ValueError("Size must be positive.") + if size == 0: + return b"" + + if self.finished: + data = self.buf.getvalue() + ret, rest = data[:size], data[size:] + self.buf = six.BytesIO() + self.buf.write(rest) + return ret + + data = self.unreader.read() + while data: + self.buf.write(data) + if self.buf.tell() > size: + break + data = self.unreader.read() + + if not data: + self.finished = True + + data = self.buf.getvalue() + ret, rest = data[:size], data[size:] + self.buf = six.BytesIO() + self.buf.write(rest) + return ret + + +class Body(object): + def __init__(self, reader): + self.reader = reader + self.buf = six.BytesIO() + + def __iter__(self): + return self + + def __next__(self): + ret = self.readline() + if not ret: + raise StopIteration() + return ret + next = __next__ + + def getsize(self, size): + if size is None: + return six.MAXSIZE + elif not isinstance(size, six.integer_types): + raise TypeError("size must be an integral type") + elif size < 0: + return six.MAXSIZE + return size + + def read(self, size=None): + size = self.getsize(size) + if size == 0: + return b"" + + if size < self.buf.tell(): + data = self.buf.getvalue() + ret, rest = data[:size], data[size:] + self.buf = six.BytesIO() + self.buf.write(rest) + return ret + + while size > self.buf.tell(): + data = self.reader.read(1024) + if not data: + break + self.buf.write(data) + + data = self.buf.getvalue() + ret, rest = data[:size], data[size:] + self.buf = six.BytesIO() + self.buf.write(rest) + return ret + + def readline(self, size=None): + size = self.getsize(size) + if size == 0: + return b"" + + data = self.buf.getvalue() + self.buf = six.BytesIO() + + ret = [] + while 1: + idx = data.find(b"\n", 0, size) + idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0 + if idx: + ret.append(data[:idx]) + self.buf.write(data[idx:]) + break + + ret.append(data) + size -= len(data) + data = self.reader.read(min(1024, size)) + if not data: + break + + return b"".join(ret) + + def readlines(self, size=None): + ret = [] + data = self.read() + while data: + pos = data.find(b"\n") + if pos < 0: + ret.append(data) + data = b"" + else: + line, data = data[:pos + 1], data[pos + 1:] + ret.append(line) + return ret diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/errors.py b/venv/lib/python3.6/site-packages/gunicorn/http/errors.py new file mode 100644 index 0000000..7839ef0 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/errors.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +# We don't need to call super() in __init__ methods of our +# BaseException and Exception classes because we also define +# our own __str__ methods so there is no need to pass 'message' +# to the base class to get a meaningful output from 'str(exc)'. +# pylint: disable=super-init-not-called + + +class ParseException(Exception): + pass + + +class NoMoreData(IOError): + def __init__(self, buf=None): + self.buf = buf + + def __str__(self): + return "No more data after: %r" % self.buf + + +class InvalidRequestLine(ParseException): + def __init__(self, req): + self.req = req + self.code = 400 + + def __str__(self): + return "Invalid HTTP request line: %r" % self.req + + +class InvalidRequestMethod(ParseException): + def __init__(self, method): + self.method = method + + def __str__(self): + return "Invalid HTTP method: %r" % self.method + + +class InvalidHTTPVersion(ParseException): + def __init__(self, version): + self.version = version + + def __str__(self): + return "Invalid HTTP Version: %r" % self.version + + +class InvalidHeader(ParseException): + def __init__(self, hdr, req=None): + self.hdr = hdr + self.req = req + + def __str__(self): + return "Invalid HTTP Header: %r" % self.hdr + + +class InvalidHeaderName(ParseException): + def __init__(self, hdr): + self.hdr = hdr + + def __str__(self): + return "Invalid HTTP header name: %r" % self.hdr + + +class InvalidChunkSize(IOError): + def __init__(self, data): + self.data = data + + def __str__(self): + return "Invalid chunk size: %r" % self.data + + +class ChunkMissingTerminator(IOError): + def __init__(self, term): + self.term = term + + def __str__(self): + return "Invalid chunk terminator is not '\\r\\n': %r" % self.term + + +class LimitRequestLine(ParseException): + def __init__(self, size, max_size): + self.size = size + self.max_size = max_size + + def __str__(self): + return "Request Line is too large (%s > %s)" % (self.size, self.max_size) + + +class LimitRequestHeaders(ParseException): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +class InvalidProxyLine(ParseException): + def __init__(self, line): + self.line = line + self.code = 400 + + def __str__(self): + return "Invalid PROXY line: %r" % self.line + + +class ForbiddenProxyRequest(ParseException): + def __init__(self, host): + self.host = host + self.code = 403 + + def __str__(self): + return "Proxy request from %r not allowed" % self.host + + +class InvalidSchemeHeaders(ParseException): + def __str__(self): + return "Contradictory scheme headers" diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/message.py b/venv/lib/python3.6/site-packages/gunicorn/http/message.py new file mode 100644 index 0000000..2700b32 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/message.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import re +import socket +from errno import ENOTCONN + +from gunicorn._compat import bytes_to_str +from gunicorn.http.unreader import SocketUnreader +from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body +from gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData, + InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion, + LimitRequestLine, LimitRequestHeaders) +from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest +from gunicorn.http.errors import InvalidSchemeHeaders +from gunicorn.six import BytesIO, string_types +from gunicorn.util import split_request_uri + +MAX_REQUEST_LINE = 8190 +MAX_HEADERS = 32768 +DEFAULT_MAX_HEADERFIELD_SIZE = 8190 + +HEADER_RE = re.compile(r"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\"]") +METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}") +VERSION_RE = re.compile(r"HTTP/(\d+)\.(\d+)") + + +class Message(object): + def __init__(self, cfg, unreader): + self.cfg = cfg + self.unreader = unreader + self.version = None + self.headers = [] + self.trailers = [] + self.body = None + self.scheme = "https" if cfg.is_ssl else "http" + + # set headers limits + self.limit_request_fields = cfg.limit_request_fields + if (self.limit_request_fields <= 0 + or self.limit_request_fields > MAX_HEADERS): + self.limit_request_fields = MAX_HEADERS + self.limit_request_field_size = cfg.limit_request_field_size + if self.limit_request_field_size < 0: + self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE + + # set max header buffer size + max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE + self.max_buffer_headers = self.limit_request_fields * \ + (max_header_field_size + 2) + 4 + + unused = self.parse(self.unreader) + self.unreader.unread(unused) + self.set_body_reader() + + def parse(self, unreader): + raise NotImplementedError() + + def parse_headers(self, data): + cfg = self.cfg + headers = [] + + # Split lines on \r\n keeping the \r\n on each line + lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")] + + # handle scheme headers + scheme_header = False + secure_scheme_headers = {} + if '*' in cfg.forwarded_allow_ips: + secure_scheme_headers = cfg.secure_scheme_headers + elif isinstance(self.unreader, SocketUnreader): + remote_addr = self.unreader.sock.getpeername() + if isinstance(remote_addr, tuple): + remote_host = remote_addr[0] + if remote_host in cfg.forwarded_allow_ips: + secure_scheme_headers = cfg.secure_scheme_headers + elif isinstance(remote_addr, string_types): + secure_scheme_headers = cfg.secure_scheme_headers + + # Parse headers into key/value pairs paying attention + # to continuation lines. + while lines: + if len(headers) >= self.limit_request_fields: + raise LimitRequestHeaders("limit request headers fields") + + # Parse initial header name : value pair. + curr = lines.pop(0) + header_length = len(curr) + if curr.find(":") < 0: + raise InvalidHeader(curr.strip()) + name, value = curr.split(":", 1) + name = name.rstrip(" \t").upper() + if HEADER_RE.search(name): + raise InvalidHeaderName(name) + + name, value = name.strip(), [value.lstrip()] + + # Consume value continuation lines + while lines and lines[0].startswith((" ", "\t")): + curr = lines.pop(0) + header_length += len(curr) + if header_length > self.limit_request_field_size > 0: + raise LimitRequestHeaders("limit request headers " + + "fields size") + value.append(curr) + value = ''.join(value).rstrip() + + if header_length > self.limit_request_field_size > 0: + raise LimitRequestHeaders("limit request headers fields size") + + if name in secure_scheme_headers: + secure = value == secure_scheme_headers[name] + scheme = "https" if secure else "http" + if scheme_header: + if scheme != self.scheme: + raise InvalidSchemeHeaders() + else: + scheme_header = True + self.scheme = scheme + + headers.append((name, value)) + + return headers + + def set_body_reader(self): + chunked = False + content_length = None + for (name, value) in self.headers: + if name == "CONTENT-LENGTH": + content_length = value + elif name == "TRANSFER-ENCODING": + chunked = value.lower() == "chunked" + elif name == "SEC-WEBSOCKET-KEY1": + content_length = 8 + + if chunked: + self.body = Body(ChunkedReader(self, self.unreader)) + elif content_length is not None: + try: + content_length = int(content_length) + except ValueError: + raise InvalidHeader("CONTENT-LENGTH", req=self) + + if content_length < 0: + raise InvalidHeader("CONTENT-LENGTH", req=self) + + self.body = Body(LengthReader(self.unreader, content_length)) + else: + self.body = Body(EOFReader(self.unreader)) + + def should_close(self): + for (h, v) in self.headers: + if h == "CONNECTION": + v = v.lower().strip() + if v == "close": + return True + elif v == "keep-alive": + return False + break + return self.version <= (1, 0) + + +class Request(Message): + def __init__(self, cfg, unreader, req_number=1): + self.method = None + self.uri = None + self.path = None + self.query = None + self.fragment = None + + # get max request line size + self.limit_request_line = cfg.limit_request_line + if (self.limit_request_line < 0 + or self.limit_request_line >= MAX_REQUEST_LINE): + self.limit_request_line = MAX_REQUEST_LINE + + self.req_number = req_number + self.proxy_protocol_info = None + super(Request, self).__init__(cfg, unreader) + + def get_data(self, unreader, buf, stop=False): + data = unreader.read() + if not data: + if stop: + raise StopIteration() + raise NoMoreData(buf.getvalue()) + buf.write(data) + + def parse(self, unreader): + buf = BytesIO() + self.get_data(unreader, buf, stop=True) + + # get request line + line, rbuf = self.read_line(unreader, buf, self.limit_request_line) + + # proxy protocol + if self.proxy_protocol(bytes_to_str(line)): + # get next request line + buf = BytesIO() + buf.write(rbuf) + line, rbuf = self.read_line(unreader, buf, self.limit_request_line) + + self.parse_request_line(line) + buf = BytesIO() + buf.write(rbuf) + + # Headers + data = buf.getvalue() + idx = data.find(b"\r\n\r\n") + + done = data[:2] == b"\r\n" + while True: + idx = data.find(b"\r\n\r\n") + done = data[:2] == b"\r\n" + + if idx < 0 and not done: + self.get_data(unreader, buf) + data = buf.getvalue() + if len(data) > self.max_buffer_headers: + raise LimitRequestHeaders("max buffer headers") + else: + break + + if done: + self.unreader.unread(data[2:]) + return b"" + + self.headers = self.parse_headers(data[:idx]) + + ret = data[idx + 4:] + buf = None + return ret + + def read_line(self, unreader, buf, limit=0): + data = buf.getvalue() + + while True: + idx = data.find(b"\r\n") + if idx >= 0: + # check if the request line is too large + if idx > limit > 0: + raise LimitRequestLine(idx, limit) + break + elif len(data) - 2 > limit > 0: + raise LimitRequestLine(len(data), limit) + self.get_data(unreader, buf) + data = buf.getvalue() + + return (data[:idx], # request line, + data[idx + 2:]) # residue in the buffer, skip \r\n + + def proxy_protocol(self, line): + """\ + Detect, check and parse proxy protocol. + + :raises: ForbiddenProxyRequest, InvalidProxyLine. + :return: True for proxy protocol line else False + """ + if not self.cfg.proxy_protocol: + return False + + if self.req_number != 1: + return False + + if not line.startswith("PROXY"): + return False + + self.proxy_protocol_access_check() + self.parse_proxy_protocol(line) + + return True + + def proxy_protocol_access_check(self): + # check in allow list + if isinstance(self.unreader, SocketUnreader): + try: + remote_host = self.unreader.sock.getpeername()[0] + except socket.error as e: + if e.args[0] == ENOTCONN: + raise ForbiddenProxyRequest("UNKNOW") + raise + if ("*" not in self.cfg.proxy_allow_ips and + remote_host not in self.cfg.proxy_allow_ips): + raise ForbiddenProxyRequest(remote_host) + + def parse_proxy_protocol(self, line): + bits = line.split() + + if len(bits) != 6: + raise InvalidProxyLine(line) + + # Extract data + proto = bits[1] + s_addr = bits[2] + d_addr = bits[3] + + # Validation + if proto not in ["TCP4", "TCP6"]: + raise InvalidProxyLine("protocol '%s' not supported" % proto) + if proto == "TCP4": + try: + socket.inet_pton(socket.AF_INET, s_addr) + socket.inet_pton(socket.AF_INET, d_addr) + except socket.error: + raise InvalidProxyLine(line) + elif proto == "TCP6": + try: + socket.inet_pton(socket.AF_INET6, s_addr) + socket.inet_pton(socket.AF_INET6, d_addr) + except socket.error: + raise InvalidProxyLine(line) + + try: + s_port = int(bits[4]) + d_port = int(bits[5]) + except ValueError: + raise InvalidProxyLine("invalid port %s" % line) + + if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)): + raise InvalidProxyLine("invalid port %s" % line) + + # Set data + self.proxy_protocol_info = { + "proxy_protocol": proto, + "client_addr": s_addr, + "client_port": s_port, + "proxy_addr": d_addr, + "proxy_port": d_port + } + + def parse_request_line(self, line_bytes): + bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)] + if len(bits) != 3: + raise InvalidRequestLine(bytes_to_str(line_bytes)) + + # Method + if not METH_RE.match(bits[0]): + raise InvalidRequestMethod(bits[0]) + self.method = bits[0].upper() + + # URI + self.uri = bits[1] + + try: + parts = split_request_uri(self.uri) + except ValueError: + raise InvalidRequestLine(bytes_to_str(line_bytes)) + self.path = parts.path or "" + self.query = parts.query or "" + self.fragment = parts.fragment or "" + + # Version + match = VERSION_RE.match(bits[2]) + if match is None: + raise InvalidHTTPVersion(bits[2]) + self.version = (int(match.group(1)), int(match.group(2))) + + def set_body_reader(self): + super(Request, self).set_body_reader() + if isinstance(self.body.reader, EOFReader): + self.body = Body(LengthReader(self.unreader, 0)) diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/parser.py b/venv/lib/python3.6/site-packages/gunicorn/http/parser.py new file mode 100644 index 0000000..a4a0f1e --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/parser.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from gunicorn.http.message import Request +from gunicorn.http.unreader import SocketUnreader, IterUnreader + + +class Parser(object): + + mesg_class = None + + def __init__(self, cfg, source): + self.cfg = cfg + if hasattr(source, "recv"): + self.unreader = SocketUnreader(source) + else: + self.unreader = IterUnreader(source) + self.mesg = None + + # request counter (for keepalive connetions) + self.req_count = 0 + + def __iter__(self): + return self + + def __next__(self): + # Stop if HTTP dictates a stop. + if self.mesg and self.mesg.should_close(): + raise StopIteration() + + # Discard any unread body of the previous message + if self.mesg: + data = self.mesg.body.read(8192) + while data: + data = self.mesg.body.read(8192) + + # Parse the next request + self.req_count += 1 + self.mesg = self.mesg_class(self.cfg, self.unreader, self.req_count) + if not self.mesg: + raise StopIteration() + return self.mesg + + next = __next__ + + +class RequestParser(Parser): + + mesg_class = Request diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/unreader.py b/venv/lib/python3.6/site-packages/gunicorn/http/unreader.py new file mode 100644 index 0000000..9f312a8 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/unreader.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import os + +from gunicorn import six + +# Classes that can undo reading data from +# a given type of data source. + + +class Unreader(object): + def __init__(self): + self.buf = six.BytesIO() + + def chunk(self): + raise NotImplementedError() + + def read(self, size=None): + if size is not None and not isinstance(size, six.integer_types): + raise TypeError("size parameter must be an int or long.") + + if size is not None: + if size == 0: + return b"" + if size < 0: + size = None + + self.buf.seek(0, os.SEEK_END) + + if size is None and self.buf.tell(): + ret = self.buf.getvalue() + self.buf = six.BytesIO() + return ret + if size is None: + d = self.chunk() + return d + + while self.buf.tell() < size: + chunk = self.chunk() + if not chunk: + ret = self.buf.getvalue() + self.buf = six.BytesIO() + return ret + self.buf.write(chunk) + data = self.buf.getvalue() + self.buf = six.BytesIO() + self.buf.write(data[size:]) + return data[:size] + + def unread(self, data): + self.buf.seek(0, os.SEEK_END) + self.buf.write(data) + + +class SocketUnreader(Unreader): + def __init__(self, sock, max_chunk=8192): + super(SocketUnreader, self).__init__() + self.sock = sock + self.mxchunk = max_chunk + + def chunk(self): + return self.sock.recv(self.mxchunk) + + +class IterUnreader(Unreader): + def __init__(self, iterable): + super(IterUnreader, self).__init__() + self.iter = iter(iterable) + + def chunk(self): + if not self.iter: + return b"" + try: + return six.next(self.iter) + except StopIteration: + self.iter = None + return b"" diff --git a/venv/lib/python3.6/site-packages/gunicorn/http/wsgi.py b/venv/lib/python3.6/site-packages/gunicorn/http/wsgi.py new file mode 100644 index 0000000..ff75974 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/http/wsgi.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import io +import logging +import os +import re +import sys + +from gunicorn._compat import unquote_to_wsgi_str +from gunicorn.http.message import HEADER_RE +from gunicorn.http.errors import InvalidHeader, InvalidHeaderName +from gunicorn.six import string_types, binary_type, reraise +from gunicorn import SERVER_SOFTWARE +import gunicorn.util as util + +try: + # Python 3.3 has os.sendfile(). + from os import sendfile +except ImportError: + try: + from ._sendfile import sendfile + except ImportError: + sendfile = None + +# Send files in at most 1GB blocks as some operating systems can have problems +# with sending files in blocks over 2GB. +BLKSIZE = 0x3FFFFFFF + +HEADER_VALUE_RE = re.compile(r'[\x00-\x1F\x7F]') + +log = logging.getLogger(__name__) + + +class FileWrapper(object): + + def __init__(self, filelike, blksize=8192): + self.filelike = filelike + self.blksize = blksize + if hasattr(filelike, 'close'): + self.close = filelike.close + + def __getitem__(self, key): + data = self.filelike.read(self.blksize) + if data: + return data + raise IndexError + + +class WSGIErrorsWrapper(io.RawIOBase): + + def __init__(self, cfg): + # There is no public __init__ method for RawIOBase so + # we don't need to call super() in the __init__ method. + # pylint: disable=super-init-not-called + errorlog = logging.getLogger("gunicorn.error") + handlers = errorlog.handlers + self.streams = [] + + if cfg.errorlog == "-": + self.streams.append(sys.stderr) + handlers = handlers[1:] + + for h in handlers: + if hasattr(h, "stream"): + self.streams.append(h.stream) + + def write(self, data): + for stream in self.streams: + try: + stream.write(data) + except UnicodeError: + stream.write(data.encode("UTF-8")) + stream.flush() + + +def base_environ(cfg): + return { + "wsgi.errors": WSGIErrorsWrapper(cfg), + "wsgi.version": (1, 0), + "wsgi.multithread": False, + "wsgi.multiprocess": (cfg.workers > 1), + "wsgi.run_once": False, + "wsgi.file_wrapper": FileWrapper, + "SERVER_SOFTWARE": SERVER_SOFTWARE, + } + + +def default_environ(req, sock, cfg): + env = base_environ(cfg) + env.update({ + "wsgi.input": req.body, + "gunicorn.socket": sock, + "REQUEST_METHOD": req.method, + "QUERY_STRING": req.query, + "RAW_URI": req.uri, + "SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version]) + }) + return env + + +def proxy_environ(req): + info = req.proxy_protocol_info + + if not info: + return {} + + return { + "PROXY_PROTOCOL": info["proxy_protocol"], + "REMOTE_ADDR": info["client_addr"], + "REMOTE_PORT": str(info["client_port"]), + "PROXY_ADDR": info["proxy_addr"], + "PROXY_PORT": str(info["proxy_port"]), + } + + +def create(req, sock, client, server, cfg): + resp = Response(req, sock, cfg) + + # set initial environ + environ = default_environ(req, sock, cfg) + + # default variables + host = None + script_name = os.environ.get("SCRIPT_NAME", "") + + # add the headers to the environ + for hdr_name, hdr_value in req.headers: + if hdr_name == "EXPECT": + # handle expect + if hdr_value.lower() == "100-continue": + sock.send(b"HTTP/1.1 100 Continue\r\n\r\n") + elif hdr_name == 'HOST': + host = hdr_value + elif hdr_name == "SCRIPT_NAME": + script_name = hdr_value + elif hdr_name == "CONTENT-TYPE": + environ['CONTENT_TYPE'] = hdr_value + continue + elif hdr_name == "CONTENT-LENGTH": + environ['CONTENT_LENGTH'] = hdr_value + continue + + key = 'HTTP_' + hdr_name.replace('-', '_') + if key in environ: + hdr_value = "%s,%s" % (environ[key], hdr_value) + environ[key] = hdr_value + + # set the url scheme + environ['wsgi.url_scheme'] = req.scheme + + # set the REMOTE_* keys in environ + # authors should be aware that REMOTE_HOST and REMOTE_ADDR + # may not qualify the remote addr: + # http://www.ietf.org/rfc/rfc3875 + if isinstance(client, string_types): + environ['REMOTE_ADDR'] = client + elif isinstance(client, binary_type): + environ['REMOTE_ADDR'] = client.decode() + else: + environ['REMOTE_ADDR'] = client[0] + environ['REMOTE_PORT'] = str(client[1]) + + # handle the SERVER_* + # Normally only the application should use the Host header but since the + # WSGI spec doesn't support unix sockets, we are using it to create + # viable SERVER_* if possible. + if isinstance(server, string_types): + server = server.split(":") + if len(server) == 1: + # unix socket + if host: + server = host.split(':') + if len(server) == 1: + if req.scheme == "http": + server.append(80) + elif req.scheme == "https": + server.append(443) + else: + server.append('') + else: + # no host header given which means that we are not behind a + # proxy, so append an empty port. + server.append('') + environ['SERVER_NAME'] = server[0] + environ['SERVER_PORT'] = str(server[1]) + + # set the path and script name + path_info = req.path + if script_name: + path_info = path_info.split(script_name, 1)[1] + environ['PATH_INFO'] = unquote_to_wsgi_str(path_info) + environ['SCRIPT_NAME'] = script_name + + # override the environ with the correct remote and server address if + # we are behind a proxy using the proxy protocol. + environ.update(proxy_environ(req)) + return resp, environ + + +class Response(object): + + def __init__(self, req, sock, cfg): + self.req = req + self.sock = sock + self.version = SERVER_SOFTWARE + self.status = None + self.chunked = False + self.must_close = False + self.headers = [] + self.headers_sent = False + self.response_length = None + self.sent = 0 + self.upgrade = False + self.cfg = cfg + + def force_close(self): + self.must_close = True + + def should_close(self): + if self.must_close or self.req.should_close(): + return True + if self.response_length is not None or self.chunked: + return False + if self.req.method == 'HEAD': + return False + if self.status_code < 200 or self.status_code in (204, 304): + return False + return True + + def start_response(self, status, headers, exc_info=None): + if exc_info: + try: + if self.status and self.headers_sent: + reraise(exc_info[0], exc_info[1], exc_info[2]) + finally: + exc_info = None + elif self.status is not None: + raise AssertionError("Response headers already set!") + + self.status = status + + # get the status code from the response here so we can use it to check + # the need for the connection header later without parsing the string + # each time. + try: + self.status_code = int(self.status.split()[0]) + except ValueError: + self.status_code = None + + self.process_headers(headers) + self.chunked = self.is_chunked() + return self.write + + def process_headers(self, headers): + for name, value in headers: + if not isinstance(name, string_types): + raise TypeError('%r is not a string' % name) + + if HEADER_RE.search(name): + raise InvalidHeaderName('%r' % name) + + if HEADER_VALUE_RE.search(value): + raise InvalidHeader('%r' % value) + + value = str(value).strip() + lname = name.lower().strip() + if lname == "content-length": + self.response_length = int(value) + elif util.is_hoppish(name): + if lname == "connection": + # handle websocket + if value.lower().strip() == "upgrade": + self.upgrade = True + elif lname == "upgrade": + if value.lower().strip() == "websocket": + self.headers.append((name.strip(), value)) + + # ignore hopbyhop headers + continue + self.headers.append((name.strip(), value)) + + def is_chunked(self): + # Only use chunked responses when the client is + # speaking HTTP/1.1 or newer and there was + # no Content-Length header set. + if self.response_length is not None: + return False + elif self.req.version <= (1, 0): + return False + elif self.req.method == 'HEAD': + # Responses to a HEAD request MUST NOT contain a response body. + return False + elif self.status_code in (204, 304): + # Do not use chunked responses when the response is guaranteed to + # not have a response body. + return False + return True + + def default_headers(self): + # set the connection header + if self.upgrade: + connection = "upgrade" + elif self.should_close(): + connection = "close" + else: + connection = "keep-alive" + + headers = [ + "HTTP/%s.%s %s\r\n" % (self.req.version[0], + self.req.version[1], self.status), + "Server: %s\r\n" % self.version, + "Date: %s\r\n" % util.http_date(), + "Connection: %s\r\n" % connection + ] + if self.chunked: + headers.append("Transfer-Encoding: chunked\r\n") + return headers + + def send_headers(self): + if self.headers_sent: + return + tosend = self.default_headers() + tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers]) + + header_str = "%s\r\n" % "".join(tosend) + util.write(self.sock, util.to_bytestring(header_str, "ascii")) + self.headers_sent = True + + def write(self, arg): + self.send_headers() + if not isinstance(arg, binary_type): + raise TypeError('%r is not a byte' % arg) + arglen = len(arg) + tosend = arglen + if self.response_length is not None: + if self.sent >= self.response_length: + # Never write more than self.response_length bytes + return + + tosend = min(self.response_length - self.sent, tosend) + if tosend < arglen: + arg = arg[:tosend] + + # Sending an empty chunk signals the end of the + # response and prematurely closes the response + if self.chunked and tosend == 0: + return + + self.sent += tosend + util.write(self.sock, arg, self.chunked) + + def can_sendfile(self): + return self.cfg.sendfile is not False and sendfile is not None + + def sendfile(self, respiter): + if self.cfg.is_ssl or not self.can_sendfile(): + return False + + if not util.has_fileno(respiter.filelike): + return False + + fileno = respiter.filelike.fileno() + try: + offset = os.lseek(fileno, 0, os.SEEK_CUR) + if self.response_length is None: + filesize = os.fstat(fileno).st_size + + # The file may be special and sendfile will fail. + # It may also be zero-length, but that is okay. + if filesize == 0: + return False + + nbytes = filesize - offset + else: + nbytes = self.response_length + except (OSError, io.UnsupportedOperation): + return False + + self.send_headers() + + if self.is_chunked(): + chunk_size = "%X\r\n" % nbytes + self.sock.sendall(chunk_size.encode('utf-8')) + + sockno = self.sock.fileno() + sent = 0 + + while sent != nbytes: + count = min(nbytes - sent, BLKSIZE) + sent += sendfile(sockno, fileno, offset + sent, count) + + if self.is_chunked(): + self.sock.sendall(b"\r\n") + + os.lseek(fileno, offset, os.SEEK_SET) + + return True + + def write_file(self, respiter): + if not self.sendfile(respiter): + for item in respiter: + self.write(item) + + def close(self): + if not self.headers_sent: + self.send_headers() + if self.chunked: + util.write_chunk(self.sock, b"") diff --git a/venv/lib/python3.6/site-packages/gunicorn/instrument/__init__.py b/venv/lib/python3.6/site-packages/gunicorn/instrument/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/__init__.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..59a52f9 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/statsd.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/statsd.cpython-36.pyc new file mode 100644 index 0000000..6c76bd1 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/instrument/__pycache__/statsd.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/instrument/statsd.py b/venv/lib/python3.6/site-packages/gunicorn/instrument/statsd.py new file mode 100644 index 0000000..4bbcb20 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/instrument/statsd.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +"Bare-bones implementation of statsD's protocol, client-side" + +import socket +import logging +from re import sub + +from gunicorn.glogging import Logger +from gunicorn import six + +# Instrumentation constants +METRIC_VAR = "metric" +VALUE_VAR = "value" +MTYPE_VAR = "mtype" +GAUGE_TYPE = "gauge" +COUNTER_TYPE = "counter" +HISTOGRAM_TYPE = "histogram" + +class Statsd(Logger): + """statsD-based instrumentation, that passes as a logger + """ + def __init__(self, cfg): + """host, port: statsD server + """ + Logger.__init__(self, cfg) + self.prefix = sub(r"^(.+[^.]+)\.*$", "\\g<1>.", cfg.statsd_prefix) + try: + host, port = cfg.statsd_host + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.connect((host, int(port))) + except Exception: + self.sock = None + + # Log errors and warnings + def critical(self, msg, *args, **kwargs): + Logger.critical(self, msg, *args, **kwargs) + self.increment("gunicorn.log.critical", 1) + + def error(self, msg, *args, **kwargs): + Logger.error(self, msg, *args, **kwargs) + self.increment("gunicorn.log.error", 1) + + def warning(self, msg, *args, **kwargs): + Logger.warning(self, msg, *args, **kwargs) + self.increment("gunicorn.log.warning", 1) + + def exception(self, msg, *args, **kwargs): + Logger.exception(self, msg, *args, **kwargs) + self.increment("gunicorn.log.exception", 1) + + # Special treatement for info, the most common log level + def info(self, msg, *args, **kwargs): + self.log(logging.INFO, msg, *args, **kwargs) + + # skip the run-of-the-mill logs + def debug(self, msg, *args, **kwargs): + self.log(logging.DEBUG, msg, *args, **kwargs) + + def log(self, lvl, msg, *args, **kwargs): + """Log a given statistic if metric, value and type are present + """ + try: + extra = kwargs.get("extra", None) + if extra is not None: + metric = extra.get(METRIC_VAR, None) + value = extra.get(VALUE_VAR, None) + typ = extra.get(MTYPE_VAR, None) + if metric and value and typ: + if typ == GAUGE_TYPE: + self.gauge(metric, value) + elif typ == COUNTER_TYPE: + self.increment(metric, value) + elif typ == HISTOGRAM_TYPE: + self.histogram(metric, value) + else: + pass + + # Log to parent logger only if there is something to say + if msg: + Logger.log(self, lvl, msg, *args, **kwargs) + except Exception: + Logger.warning(self, "Failed to log to statsd", exc_info=True) + + # access logging + def access(self, resp, req, environ, request_time): + """Measure request duration + request_time is a datetime.timedelta + """ + Logger.access(self, resp, req, environ, request_time) + duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3 + status = resp.status + if isinstance(status, str): + status = int(status.split(None, 1)[0]) + self.histogram("gunicorn.request.duration", duration_in_ms) + self.increment("gunicorn.requests", 1) + self.increment("gunicorn.request.status.%d" % status, 1) + + # statsD methods + # you can use those directly if you want + def gauge(self, name, value): + self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value)) + + def increment(self, name, value, sampling_rate=1.0): + self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) + + def decrement(self, name, value, sampling_rate=1.0): + self._sock_send("{0}{1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) + + def histogram(self, name, value): + self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value)) + + def _sock_send(self, msg): + try: + if isinstance(msg, six.text_type): + msg = msg.encode("ascii") + if self.sock: + self.sock.send(msg) + except Exception: + Logger.warning(self, "Error sending message to statsd", exc_info=True) diff --git a/venv/lib/python3.6/site-packages/gunicorn/pidfile.py b/venv/lib/python3.6/site-packages/gunicorn/pidfile.py new file mode 100644 index 0000000..a6e085f --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/pidfile.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import errno +import os +import tempfile + + +class Pidfile(object): + """\ + Manage a PID file. If a specific name is provided + it and '"%s.oldpid" % name' will be used. Otherwise + we create a temp file using os.mkstemp. + """ + + def __init__(self, fname): + self.fname = fname + self.pid = None + + def create(self, pid): + oldpid = self.validate() + if oldpid: + if oldpid == os.getpid(): + return + msg = "Already running on PID %s (or pid file '%s' is stale)" + raise RuntimeError(msg % (oldpid, self.fname)) + + self.pid = pid + + # Write pidfile + fdir = os.path.dirname(self.fname) + if fdir and not os.path.isdir(fdir): + raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir) + fd, fname = tempfile.mkstemp(dir=fdir) + os.write(fd, ("%s\n" % self.pid).encode('utf-8')) + if self.fname: + os.rename(fname, self.fname) + else: + self.fname = fname + os.close(fd) + + # set permissions to -rw-r--r-- + os.chmod(self.fname, 420) + + def rename(self, path): + self.unlink() + self.fname = path + self.create(self.pid) + + def unlink(self): + """ delete pidfile""" + try: + with open(self.fname, "r") as f: + pid1 = int(f.read() or 0) + + if pid1 == self.pid: + os.unlink(self.fname) + except: + pass + + def validate(self): + """ Validate pidfile and make it stale if needed""" + if not self.fname: + return + try: + with open(self.fname, "r") as f: + try: + wpid = int(f.read()) + except ValueError: + return + + try: + os.kill(wpid, 0) + return wpid + except OSError as e: + if e.args[0] == errno.EPERM: + return wpid + if e.args[0] == errno.ESRCH: + return + raise + except IOError as e: + if e.args[0] == errno.ENOENT: + return + raise diff --git a/venv/lib/python3.6/site-packages/gunicorn/reloader.py b/venv/lib/python3.6/site-packages/gunicorn/reloader.py new file mode 100644 index 0000000..c879885 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/reloader.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import os +import os.path +import re +import sys +import time +import threading + +COMPILED_EXT_RE = re.compile(r'py[co]$') + + +class Reloader(threading.Thread): + def __init__(self, extra_files=None, interval=1, callback=None): + super(Reloader, self).__init__() + self.setDaemon(True) + self._extra_files = set(extra_files or ()) + self._extra_files_lock = threading.RLock() + self._interval = interval + self._callback = callback + + def add_extra_file(self, filename): + with self._extra_files_lock: + self._extra_files.add(filename) + + def get_files(self): + fnames = [ + COMPILED_EXT_RE.sub('py', module.__file__) + for module in tuple(sys.modules.values()) + if getattr(module, '__file__', None) + ] + + with self._extra_files_lock: + fnames.extend(self._extra_files) + + return fnames + + def run(self): + mtimes = {} + while True: + for filename in self.get_files(): + try: + mtime = os.stat(filename).st_mtime + except OSError: + continue + old_time = mtimes.get(filename) + if old_time is None: + mtimes[filename] = mtime + continue + elif mtime > old_time: + if self._callback: + self._callback(filename) + time.sleep(self._interval) + +has_inotify = False +if sys.platform.startswith('linux'): + try: + from inotify.adapters import Inotify + import inotify.constants + has_inotify = True + except ImportError: + pass + + +if has_inotify: + + class InotifyReloader(threading.Thread): + event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE + | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY + | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM + | inotify.constants.IN_MOVED_TO) + + def __init__(self, extra_files=None, callback=None): + super(InotifyReloader, self).__init__() + self.setDaemon(True) + self._callback = callback + self._dirs = set() + self._watcher = Inotify() + + for extra_file in extra_files: + self.add_extra_file(extra_file) + + def add_extra_file(self, filename): + dirname = os.path.dirname(filename) + + if dirname in self._dirs: + return + + self._watcher.add_watch(dirname, mask=self.event_mask) + self._dirs.add(dirname) + + def get_dirs(self): + fnames = [ + os.path.dirname(COMPILED_EXT_RE.sub('py', module.__file__)) + for module in tuple(sys.modules.values()) + if hasattr(module, '__file__') + ] + + return set(fnames) + + def run(self): + self._dirs = self.get_dirs() + + for dirname in self._dirs: + self._watcher.add_watch(dirname, mask=self.event_mask) + + for event in self._watcher.event_gen(): + if event is None: + continue + + filename = event[3] + + self._callback(filename) + +else: + + class InotifyReloader(object): + def __init__(self, callback=None): + raise ImportError('You must have the inotify module installed to ' + 'use the inotify reloader') + + +preferred_reloader = InotifyReloader if has_inotify else Reloader + +reloader_engines = { + 'auto': preferred_reloader, + 'poll': Reloader, + 'inotify': InotifyReloader, +} diff --git a/venv/lib/python3.6/site-packages/gunicorn/selectors.py b/venv/lib/python3.6/site-packages/gunicorn/selectors.py new file mode 100644 index 0000000..cdae569 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/selectors.py @@ -0,0 +1,592 @@ +"""Selectors module. + +This module allows high-level and efficient I/O multiplexing, built upon the +`select` module primitives. + +The following code adapted from trollius.selectors. +""" + + +from abc import ABCMeta, abstractmethod +from collections import namedtuple, Mapping +import math +import select +import sys + +from gunicorn._compat import wrap_error, InterruptedError +from gunicorn import six + + +# generic events, that must be mapped to implementation-specific ones +EVENT_READ = (1 << 0) +EVENT_WRITE = (1 << 1) + + +def _fileobj_to_fd(fileobj): + """Return a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + """ + if isinstance(fileobj, six.integer_types): + fd = fileobj + else: + try: + fd = int(fileobj.fileno()) + except (AttributeError, TypeError, ValueError): + raise ValueError("Invalid file object: " + "{0!r}".format(fileobj)) + if fd < 0: + raise ValueError("Invalid file descriptor: {0}".format(fd)) + return fd + + +SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) +"""Object used to associate a file object to its backing file descriptor, +selected event mask and attached data.""" + + +class _SelectorMapping(Mapping): + """Mapping of file objects to selector keys.""" + + def __init__(self, selector): + self._selector = selector + + def __len__(self): + return len(self._selector._fd_to_key) + + def __getitem__(self, fileobj): + try: + fd = self._selector._fileobj_lookup(fileobj) + return self._selector._fd_to_key[fd] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + def __iter__(self): + return iter(self._selector._fd_to_key) + + +class BaseSelector(six.with_metaclass(ABCMeta)): + """Selector abstract base class. + + A selector supports registering file objects to be monitored for specific + I/O events. + + A file object is a file descriptor or any object with a `fileno()` method. + An arbitrary object can be attached to the file object, which can be used + for example to store context information, a callback, etc. + + A selector can use various implementations (select(), poll(), epoll()...) + depending on the platform. The default `Selector` class uses the most + efficient implementation on the current platform. + """ + + @abstractmethod + def register(self, fileobj, events, data=None): + """Register a file object. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + ValueError if events is invalid + KeyError if fileobj is already registered + OSError if fileobj is closed or otherwise is unacceptable to + the underlying system call (if a system call is made) + + Note: + OSError may or may not be raised + """ + raise NotImplementedError + + @abstractmethod + def unregister(self, fileobj): + """Unregister a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + SelectorKey instance + + Raises: + KeyError if fileobj is not registered + + Note: + If fileobj is registered but has since been closed this does + *not* raise OSError (even if the wrapped syscall does) + """ + raise NotImplementedError + + def modify(self, fileobj, events, data=None): + """Change a registered file object monitored events or attached data. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + Anything that unregister() or register() raises + """ + self.unregister(fileobj) + return self.register(fileobj, events, data) + + @abstractmethod + def select(self, timeout=None): + """Perform the actual selection, until some monitored file objects are + ready or a timeout expires. + + Parameters: + timeout -- if timeout > 0, this specifies the maximum wait time, in + seconds + if timeout <= 0, the select() call won't block, and will + report the currently ready file objects + if timeout is None, select() will block until a monitored + file object becomes ready + + Returns: + list of (key, events) for ready file objects + `events` is a bitwise mask of EVENT_READ|EVENT_WRITE + """ + raise NotImplementedError + + def close(self): + """Close the selector. + + This must be called to make sure that any underlying resource is freed. + """ + pass + + def get_key(self, fileobj): + """Return the key associated to a registered file object. + + Returns: + SelectorKey for this file object + """ + mapping = self.get_map() + try: + return mapping[fileobj] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + @abstractmethod + def get_map(self): + """Return a mapping of file objects to selector keys.""" + raise NotImplementedError + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + +class _BaseSelectorImpl(BaseSelector): + """Base selector implementation.""" + + def __init__(self): + # this maps file descriptors to keys + self._fd_to_key = {} + # read-only mapping returned by get_map() + self._map = _SelectorMapping(self) + + def _fileobj_lookup(self, fileobj): + """Return a file descriptor from a file object. + + This wraps _fileobj_to_fd() to do an exhaustive search in case + the object is invalid but we still have it in our map. This + is used by unregister() so we can unregister an object that + was previously registered even if it is closed. It is also + used by _SelectorMapping. + """ + try: + return _fileobj_to_fd(fileobj) + except ValueError: + # Do an exhaustive search. + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + return key.fd + # Raise ValueError after all. + raise + + def register(self, fileobj, events, data=None): + if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): + raise ValueError("Invalid events: {0!r}".format(events)) + + key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) + + if key.fd in self._fd_to_key: + raise KeyError("{0!r} (FD {1}) is already registered" + .format(fileobj, key.fd)) + + self._fd_to_key[key.fd] = key + return key + + def unregister(self, fileobj): + try: + key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + return key + + def modify(self, fileobj, events, data=None): + # TODO: Subclasses can probably optimize this even further. + try: + key = self._fd_to_key[self._fileobj_lookup(fileobj)] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + if events != key.events: + self.unregister(fileobj) + key = self.register(fileobj, events, data) + elif data != key.data: + # Use a shortcut to update the data. + key = key._replace(data=data) + self._fd_to_key[key.fd] = key + return key + + def close(self): + self._fd_to_key.clear() + + def get_map(self): + return self._map + + def _key_from_fd(self, fd): + """Return the key associated to a given file descriptor. + + Parameters: + fd -- file descriptor + + Returns: + corresponding key, or None if not found + """ + try: + return self._fd_to_key[fd] + except KeyError: + return None + + +class SelectSelector(_BaseSelectorImpl): + """Select-based selector.""" + + def __init__(self): + super(SelectSelector, self).__init__() + self._readers = set() + self._writers = set() + + def register(self, fileobj, events, data=None): + key = super(SelectSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + self._readers.add(key.fd) + if events & EVENT_WRITE: + self._writers.add(key.fd) + return key + + def unregister(self, fileobj): + key = super(SelectSelector, self).unregister(fileobj) + self._readers.discard(key.fd) + self._writers.discard(key.fd) + return key + + if sys.platform == 'win32': + def _select(self, r, w, _, timeout=None): + r, w, x = select.select(r, w, w, timeout) + return r, w + x, [] + else: + _select = select.select + + def select(self, timeout=None): + timeout = None if timeout is None else max(timeout, 0) + ready = [] + try: + r, w, _ = wrap_error(self._select, + self._readers, self._writers, [], timeout) + except InterruptedError: + return ready + r = set(r) + w = set(w) + for fd in r | w: + events = 0 + if fd in r: + events |= EVENT_READ + if fd in w: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + +if hasattr(select, 'poll'): + + class PollSelector(_BaseSelectorImpl): + """Poll-based selector.""" + + def __init__(self): + super(PollSelector, self).__init__() + self._poll = select.poll() + + def register(self, fileobj, events, data=None): + key = super(PollSelector, self).register(fileobj, events, data) + poll_events = 0 + if events & EVENT_READ: + poll_events |= select.POLLIN + if events & EVENT_WRITE: + poll_events |= select.POLLOUT + self._poll.register(key.fd, poll_events) + return key + + def unregister(self, fileobj): + key = super(PollSelector, self).unregister(fileobj) + self._poll.unregister(key.fd) + return key + + def select(self, timeout=None): + if timeout is None: + timeout = None + elif timeout <= 0: + timeout = 0 + else: + # poll() has a resolution of 1 millisecond, round away from + # zero to wait *at least* timeout seconds. + timeout = int(math.ceil(timeout * 1e3)) + ready = [] + try: + fd_event_list = wrap_error(self._poll.poll, timeout) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.POLLIN: + events |= EVENT_WRITE + if event & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + +if hasattr(select, 'epoll'): + + class EpollSelector(_BaseSelectorImpl): + """Epoll-based selector.""" + + def __init__(self): + super(EpollSelector, self).__init__() + self._epoll = select.epoll() + + def fileno(self): + return self._epoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(EpollSelector, self).register(fileobj, events, data) + epoll_events = 0 + if events & EVENT_READ: + epoll_events |= select.EPOLLIN + if events & EVENT_WRITE: + epoll_events |= select.EPOLLOUT + self._epoll.register(key.fd, epoll_events) + return key + + def unregister(self, fileobj): + key = super(EpollSelector, self).unregister(fileobj) + try: + self._epoll.unregister(key.fd) + except OSError: + # This can happen if the FD was closed since it + # was registered. + pass + return key + + def select(self, timeout=None): + if timeout is None: + timeout = -1 + elif timeout <= 0: + timeout = 0 + else: + # epoll_wait() has a resolution of 1 millisecond, round away + # from zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1e3) * 1e-3 + max_ev = len(self._fd_to_key) + ready = [] + try: + fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.EPOLLIN: + events |= EVENT_WRITE + if event & ~select.EPOLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._epoll.close() + super(EpollSelector, self).close() + + +if hasattr(select, 'devpoll'): + + class DevpollSelector(_BaseSelectorImpl): + """Solaris /dev/poll selector.""" + + def __init__(self): + super(DevpollSelector, self).__init__() + self._devpoll = select.devpoll() + + def fileno(self): + return self._devpoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(DevpollSelector, self).register(fileobj, events, data) + poll_events = 0 + if events & EVENT_READ: + poll_events |= select.POLLIN + if events & EVENT_WRITE: + poll_events |= select.POLLOUT + self._devpoll.register(key.fd, poll_events) + return key + + def unregister(self, fileobj): + key = super(DevpollSelector, self).unregister(fileobj) + self._devpoll.unregister(key.fd) + return key + + def select(self, timeout=None): + if timeout is None: + timeout = None + elif timeout <= 0: + timeout = 0 + else: + # devpoll() has a resolution of 1 millisecond, round away from + # zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1e3) + ready = [] + try: + fd_event_list = self._devpoll.poll(timeout) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.POLLIN: + events |= EVENT_WRITE + if event & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._devpoll.close() + super(DevpollSelector, self).close() + + +if hasattr(select, 'kqueue'): + + class KqueueSelector(_BaseSelectorImpl): + """Kqueue-based selector.""" + + def __init__(self): + super(KqueueSelector, self).__init__() + self._kqueue = select.kqueue() + + def fileno(self): + return self._kqueue.fileno() + + def register(self, fileobj, events, data=None): + key = super(KqueueSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + kev = select.kevent(key.fd, select.KQ_FILTER_READ, + select.KQ_EV_ADD) + self._kqueue.control([kev], 0, 0) + if events & EVENT_WRITE: + kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, + select.KQ_EV_ADD) + self._kqueue.control([kev], 0, 0) + return key + + def unregister(self, fileobj): + key = super(KqueueSelector, self).unregister(fileobj) + if key.events & EVENT_READ: + kev = select.kevent(key.fd, select.KQ_FILTER_READ, + select.KQ_EV_DELETE) + try: + self._kqueue.control([kev], 0, 0) + except OSError: + # This can happen if the FD was closed since it + # was registered. + pass + if key.events & EVENT_WRITE: + kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, + select.KQ_EV_DELETE) + try: + self._kqueue.control([kev], 0, 0) + except OSError: + # See comment above. + pass + return key + + def select(self, timeout=None): + timeout = None if timeout is None else max(timeout, 0) + max_ev = len(self._fd_to_key) + ready = [] + try: + kev_list = wrap_error(self._kqueue.control, + None, max_ev, timeout) + except InterruptedError: + return ready + for kev in kev_list: + fd = kev.ident + flag = kev.filter + events = 0 + if flag == select.KQ_FILTER_READ: + events |= EVENT_READ + if flag == select.KQ_FILTER_WRITE: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._kqueue.close() + super(KqueueSelector, self).close() + + +# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select. +# select() also can't accept a FD > FD_SETSIZE (usually around 1024) +if 'KqueueSelector' in globals(): + DefaultSelector = KqueueSelector +elif 'EpollSelector' in globals(): + DefaultSelector = EpollSelector +elif 'DevpollSelector' in globals(): + DefaultSelector = DevpollSelector +elif 'PollSelector' in globals(): + DefaultSelector = PollSelector +else: + DefaultSelector = SelectSelector diff --git a/venv/lib/python3.6/site-packages/gunicorn/six.py b/venv/lib/python3.6/site-packages/gunicorn/six.py new file mode 100644 index 0000000..21b0e80 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/six.py @@ -0,0 +1,762 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2014 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.8.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + # This is a bit ugly, but it avoids running this again. + delattr(obj.__class__, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) +else: + def iterkeys(d, **kw): + return iter(d.iterkeys(**kw)) + + def itervalues(d, **kw): + return iter(d.itervalues(**kw)) + + def iteritems(d, **kw): + return iter(d.iteritems(**kw)) + + def iterlists(d, **kw): + return iter(d.iterlists(**kw)) + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + unichr = chr + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + # Workaround for standalone backslash + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + def byte2int(bs): + return ord(bs[0]) + def indexbytes(buf, i): + return ord(buf[i]) + def iterbytes(buf): + return (ord(byte) for byte in buf) + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/venv/lib/python3.6/site-packages/gunicorn/sock.py b/venv/lib/python3.6/site-packages/gunicorn/sock.py new file mode 100644 index 0000000..8870936 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/sock.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import errno +import os +import socket +import stat +import sys +import time + +from gunicorn import util +from gunicorn.six import string_types + + +class BaseSocket(object): + + def __init__(self, address, conf, log, fd=None): + self.log = log + self.conf = conf + + self.cfg_addr = address + if fd is None: + sock = socket.socket(self.FAMILY, socket.SOCK_STREAM) + bound = False + else: + sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM) + os.close(fd) + bound = True + + self.sock = self.set_options(sock, bound=bound) + + def __str__(self): + return "" % self.sock.fileno() + + def __getattr__(self, name): + return getattr(self.sock, name) + + def set_options(self, sock, bound=False): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if (self.conf.reuse_port + and hasattr(socket, 'SO_REUSEPORT')): # pragma: no cover + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except socket.error as err: + if err.errno not in (errno.ENOPROTOOPT, errno.EINVAL): + raise + if not bound: + self.bind(sock) + sock.setblocking(0) + + # make sure that the socket can be inherited + if hasattr(sock, "set_inheritable"): + sock.set_inheritable(True) + + sock.listen(self.conf.backlog) + return sock + + def bind(self, sock): + sock.bind(self.cfg_addr) + + def close(self): + if self.sock is None: + return + + try: + self.sock.close() + except socket.error as e: + self.log.info("Error while closing socket %s", str(e)) + + self.sock = None + + +class TCPSocket(BaseSocket): + + FAMILY = socket.AF_INET + + def __str__(self): + if self.conf.is_ssl: + scheme = "https" + else: + scheme = "http" + + addr = self.sock.getsockname() + return "%s://%s:%d" % (scheme, addr[0], addr[1]) + + def set_options(self, sock, bound=False): + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + return super(TCPSocket, self).set_options(sock, bound=bound) + + +class TCP6Socket(TCPSocket): + + FAMILY = socket.AF_INET6 + + def __str__(self): + (host, port, _, _) = self.sock.getsockname() + return "http://[%s]:%d" % (host, port) + + +class UnixSocket(BaseSocket): + + FAMILY = socket.AF_UNIX + + def __init__(self, addr, conf, log, fd=None): + if fd is None: + try: + st = os.stat(addr) + except OSError as e: + if e.args[0] != errno.ENOENT: + raise + else: + if stat.S_ISSOCK(st.st_mode): + os.remove(addr) + else: + raise ValueError("%r is not a socket" % addr) + super(UnixSocket, self).__init__(addr, conf, log, fd=fd) + + def __str__(self): + return "unix:%s" % self.cfg_addr + + def bind(self, sock): + old_umask = os.umask(self.conf.umask) + sock.bind(self.cfg_addr) + util.chown(self.cfg_addr, self.conf.uid, self.conf.gid) + os.umask(old_umask) + + +def _sock_type(addr): + if isinstance(addr, tuple): + if util.is_ipv6(addr[0]): + sock_type = TCP6Socket + else: + sock_type = TCPSocket + elif isinstance(addr, string_types): + sock_type = UnixSocket + else: + raise TypeError("Unable to create socket from: %r" % addr) + return sock_type + + +def create_sockets(conf, log, fds=None): + """ + Create a new socket for the configured addresses or file descriptors. + + If a configured address is a tuple then a TCP socket is created. + If it is a string, a Unix socket is created. Otherwise, a TypeError is + raised. + """ + listeners = [] + + # get it only once + laddr = conf.address + + # check ssl config early to raise the error on startup + # only the certfile is needed since it can contains the keyfile + if conf.certfile and not os.path.exists(conf.certfile): + raise ValueError('certfile "%s" does not exist' % conf.certfile) + + if conf.keyfile and not os.path.exists(conf.keyfile): + raise ValueError('keyfile "%s" does not exist' % conf.keyfile) + + # sockets are already bound + if fds is not None: + for fd in fds: + sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) + sock_name = sock.getsockname() + sock_type = _sock_type(sock_name) + listener = sock_type(sock_name, conf, log, fd=fd) + listeners.append(listener) + + return listeners + + # no sockets is bound, first initialization of gunicorn in this env. + for addr in laddr: + sock_type = _sock_type(addr) + sock = None + for i in range(5): + try: + sock = sock_type(addr, conf, log) + except socket.error as e: + if e.args[0] == errno.EADDRINUSE: + log.error("Connection in use: %s", str(addr)) + if e.args[0] == errno.EADDRNOTAVAIL: + log.error("Invalid address: %s", str(addr)) + if i < 5: + msg = "connection to {addr} failed: {error}" + log.debug(msg.format(addr=str(addr), error=str(e))) + log.error("Retrying in 1 second.") + time.sleep(1) + else: + break + + if sock is None: + log.error("Can't connect to %s", str(addr)) + sys.exit(1) + + listeners.append(sock) + + return listeners + + +def close_sockets(listeners, unlink=True): + for sock in listeners: + sock_name = sock.getsockname() + sock.close() + if unlink and _sock_type(sock_name) is UnixSocket: + os.unlink(sock_name) diff --git a/venv/lib/python3.6/site-packages/gunicorn/systemd.py b/venv/lib/python3.6/site-packages/gunicorn/systemd.py new file mode 100644 index 0000000..10ffb8d --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/systemd.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import os + +SD_LISTEN_FDS_START = 3 + + +def listen_fds(unset_environment=True): + """ + Get the number of sockets inherited from systemd socket activation. + + :param unset_environment: clear systemd environment variables unless False + :type unset_environment: bool + :return: the number of sockets to inherit from systemd socket activation + :rtype: int + + Returns zero immediately if $LISTEN_PID is not set to the current pid. + Otherwise, returns the number of systemd activation sockets specified by + $LISTEN_FDS. + + When $LISTEN_PID matches the current pid, unsets the environment variables + unless the ``unset_environment`` flag is ``False``. + + .. note:: + Unlike the sd_listen_fds C function, this implementation does not set + the FD_CLOEXEC flag because the gunicorn arbiter never needs to do this. + + .. seealso:: + ``_ + + """ + fds = int(os.environ.get('LISTEN_FDS', 0)) + listen_pid = int(os.environ.get('LISTEN_PID', 0)) + + if listen_pid != os.getpid(): + return 0 + + if unset_environment: + os.environ.pop('LISTEN_PID', None) + os.environ.pop('LISTEN_FDS', None) + + return fds diff --git a/venv/lib/python3.6/site-packages/gunicorn/util.py b/venv/lib/python3.6/site-packages/gunicorn/util.py new file mode 100644 index 0000000..84f6937 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/util.py @@ -0,0 +1,557 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from __future__ import print_function + +import email.utils +import fcntl +import io +import os +import pkg_resources +import pwd +import random +import socket +import sys +import textwrap +import time +import traceback +import inspect +import errno +import warnings +import logging +import re + +from gunicorn import _compat +from gunicorn.errors import AppImportError +from gunicorn.six import text_type +from gunicorn.workers import SUPPORTED_WORKERS + +REDIRECT_TO = getattr(os, 'devnull', '/dev/null') + +# Server and Date aren't technically hop-by-hop +# headers, but they are in the purview of the +# origin server which the WSGI spec says we should +# act like. So we drop them and add our own. +# +# In the future, concatenation server header values +# might be better, but nothing else does it and +# dropping them is easier. +hop_headers = set(""" + connection keep-alive proxy-authenticate proxy-authorization + te trailers transfer-encoding upgrade + server date + """.split()) + +try: + from setproctitle import setproctitle + + def _setproctitle(title): + setproctitle("gunicorn: %s" % title) +except ImportError: + def _setproctitle(title): + return + + +try: + from importlib import import_module +except ImportError: + def _resolve_name(name, package, level): + """Return the absolute name of the module to be imported.""" + if not hasattr(package, 'rindex'): + raise ValueError("'package' not set to a string") + dot = len(package) + for _ in range(level, 1, -1): + try: + dot = package.rindex('.', 0, dot) + except ValueError: + msg = "attempted relative import beyond top-level package" + raise ValueError(msg) + return "%s.%s" % (package[:dot], name) + + def import_module(name, package=None): + """Import a module. + +The 'package' argument is required when performing a relative import. It +specifies the package to use as the anchor point from which to resolve the +relative import to an absolute import. + +""" + if name.startswith('.'): + if not package: + raise TypeError("relative imports require the 'package' argument") + level = 0 + for character in name: + if character != '.': + break + level += 1 + name = _resolve_name(name[level:], package, level) + __import__(name) + return sys.modules[name] + + +def load_class(uri, default="gunicorn.workers.sync.SyncWorker", + section="gunicorn.workers"): + if inspect.isclass(uri): + return uri + if uri.startswith("egg:"): + # uses entry points + entry_str = uri.split("egg:")[1] + try: + dist, name = entry_str.rsplit("#", 1) + except ValueError: + dist = entry_str + name = default + + try: + return pkg_resources.load_entry_point(dist, section, name) + except: + exc = traceback.format_exc() + msg = "class uri %r invalid or not found: \n\n[%s]" + raise RuntimeError(msg % (uri, exc)) + else: + components = uri.split('.') + if len(components) == 1: + while True: + if uri.startswith("#"): + uri = uri[1:] + + if uri in SUPPORTED_WORKERS: + components = SUPPORTED_WORKERS[uri].split(".") + break + + try: + return pkg_resources.load_entry_point("gunicorn", + section, uri) + except: + exc = traceback.format_exc() + msg = "class uri %r invalid or not found: \n\n[%s]" + raise RuntimeError(msg % (uri, exc)) + + klass = components.pop(-1) + + try: + mod = import_module('.'.join(components)) + except: + exc = traceback.format_exc() + msg = "class uri %r invalid or not found: \n\n[%s]" + raise RuntimeError(msg % (uri, exc)) + return getattr(mod, klass) + + +def get_username(uid): + """ get the username for a user id""" + return pwd.getpwuid(uid).pw_name + + +def set_owner_process(uid, gid, initgroups=False): + """ set user and group of workers processes """ + + if gid: + if uid: + try: + username = get_username(uid) + except KeyError: + initgroups = False + + # versions of python < 2.6.2 don't manage unsigned int for + # groups like on osx or fedora + gid = abs(gid) & 0x7FFFFFFF + + if initgroups: + os.initgroups(username, gid) + elif gid != os.getgid(): + os.setgid(gid) + + if uid: + os.setuid(uid) + + +def chown(path, uid, gid): + gid = abs(gid) & 0x7FFFFFFF # see note above. + os.chown(path, uid, gid) + + +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7@4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existence of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not L if waitall else name in L: + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) +else: + _unlink = os.unlink + + +def unlink(filename): + try: + _unlink(filename) + except OSError as error: + # The filename need not exist. + if error.errno not in (errno.ENOENT, errno.ENOTDIR): + raise + + +def is_ipv6(addr): + try: + socket.inet_pton(socket.AF_INET6, addr) + except socket.error: # not a valid address + return False + except ValueError: # ipv6 not supported on this platform + return False + return True + + +def parse_address(netloc, default_port=8000): + if re.match(r'unix:(//)?', netloc): + return re.split(r'unix:(//)?', netloc)[-1] + + if netloc.startswith("tcp://"): + netloc = netloc.split("tcp://")[1] + + # get host + if '[' in netloc and ']' in netloc: + host = netloc.split(']')[0][1:].lower() + elif ':' in netloc: + host = netloc.split(':')[0].lower() + elif netloc == "": + host = "0.0.0.0" + else: + host = netloc.lower() + + #get port + netloc = netloc.split(']')[-1] + if ":" in netloc: + port = netloc.split(':', 1)[1] + if not port.isdigit(): + raise RuntimeError("%r is not a valid port number." % port) + port = int(port) + else: + port = default_port + return (host, port) + + +def close_on_exec(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(fd, fcntl.F_SETFD, flags) + + +def set_non_blocking(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + + +def close(sock): + try: + sock.close() + except socket.error: + pass + +try: + from os import closerange +except ImportError: + def closerange(fd_low, fd_high): + # Iterate through and close all file descriptors. + for fd in range(fd_low, fd_high): + try: + os.close(fd) + except OSError: # ERROR, fd wasn't open to begin with (ignored) + pass + + +def write_chunk(sock, data): + if isinstance(data, text_type): + data = data.encode('utf-8') + chunk_size = "%X\r\n" % len(data) + chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"]) + sock.sendall(chunk) + + +def write(sock, data, chunked=False): + if chunked: + return write_chunk(sock, data) + sock.sendall(data) + + +def write_nonblock(sock, data, chunked=False): + timeout = sock.gettimeout() + if timeout != 0.0: + try: + sock.setblocking(0) + return write(sock, data, chunked) + finally: + sock.setblocking(1) + else: + return write(sock, data, chunked) + + +def write_error(sock, status_int, reason, mesg): + html = textwrap.dedent("""\ + + + %(reason)s + + +

%(reason)s

+ %(mesg)s + + + """) % {"reason": reason, "mesg": _compat.html_escape(mesg)} + + http = textwrap.dedent("""\ + HTTP/1.1 %s %s\r + Connection: close\r + Content-Type: text/html\r + Content-Length: %d\r + \r + %s""") % (str(status_int), reason, len(html), html) + write_nonblock(sock, http.encode('latin1')) + + +def import_app(module): + parts = module.split(":", 1) + if len(parts) == 1: + module, obj = module, "application" + else: + module, obj = parts[0], parts[1] + + try: + __import__(module) + except ImportError: + if module.endswith(".py") and os.path.exists(module): + msg = "Failed to find application, did you mean '%s:%s'?" + raise ImportError(msg % (module.rsplit(".", 1)[0], obj)) + else: + raise + + mod = sys.modules[module] + + is_debug = logging.root.level == logging.DEBUG + try: + app = eval(obj, vars(mod)) + except NameError: + if is_debug: + traceback.print_exception(*sys.exc_info()) + raise AppImportError("Failed to find application object %r in %r" % (obj, module)) + + if app is None: + raise AppImportError("Failed to find application object: %r" % obj) + + if not callable(app): + raise AppImportError("Application object must be callable.") + return app + + +def getcwd(): + # get current path, try to use PWD env first + try: + a = os.stat(os.environ['PWD']) + b = os.stat(os.getcwd()) + if a.st_ino == b.st_ino and a.st_dev == b.st_dev: + cwd = os.environ['PWD'] + else: + cwd = os.getcwd() + except: + cwd = os.getcwd() + return cwd + + +def http_date(timestamp=None): + """Return the current date and time formatted for a message header.""" + if timestamp is None: + timestamp = time.time() + s = email.utils.formatdate(timestamp, localtime=False, usegmt=True) + return s + + +def is_hoppish(header): + return header.lower().strip() in hop_headers + + +def daemonize(enable_stdio_inheritance=False): + """\ + Standard daemonization of a process. + http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 + """ + if 'GUNICORN_FD' not in os.environ: + if os.fork(): + os._exit(0) + os.setsid() + + if os.fork(): + os._exit(0) + + os.umask(0o22) + + # In both the following any file descriptors above stdin + # stdout and stderr are left untouched. The inheritance + # option simply allows one to have output go to a file + # specified by way of shell redirection when not wanting + # to use --error-log option. + + if not enable_stdio_inheritance: + # Remap all of stdin, stdout and stderr on to + # /dev/null. The expectation is that users have + # specified the --error-log option. + + closerange(0, 3) + + fd_null = os.open(REDIRECT_TO, os.O_RDWR) + + if fd_null != 0: + os.dup2(fd_null, 0) + + os.dup2(fd_null, 1) + os.dup2(fd_null, 2) + + else: + fd_null = os.open(REDIRECT_TO, os.O_RDWR) + + # Always redirect stdin to /dev/null as we would + # never expect to need to read interactive input. + + if fd_null != 0: + os.close(0) + os.dup2(fd_null, 0) + + # If stdout and stderr are still connected to + # their original file descriptors we check to see + # if they are associated with terminal devices. + # When they are we map them to /dev/null so that + # are still detached from any controlling terminal + # properly. If not we preserve them as they are. + # + # If stdin and stdout were not hooked up to the + # original file descriptors, then all bets are + # off and all we can really do is leave them as + # they were. + # + # This will allow 'gunicorn ... > output.log 2>&1' + # to work with stdout/stderr going to the file + # as expected. + # + # Note that if using --error-log option, the log + # file specified through shell redirection will + # only be used up until the log file specified + # by the option takes over. As it replaces stdout + # and stderr at the file descriptor level, then + # anything using stdout or stderr, including having + # cached a reference to them, will still work. + + def redirect(stream, fd_expect): + try: + fd = stream.fileno() + if fd == fd_expect and stream.isatty(): + os.close(fd) + os.dup2(fd_null, fd) + except AttributeError: + pass + + redirect(sys.stdout, 1) + redirect(sys.stderr, 2) + + +def seed(): + try: + random.seed(os.urandom(64)) + except NotImplementedError: + random.seed('%s.%s' % (time.time(), os.getpid())) + + +def check_is_writeable(path): + try: + f = open(path, 'a') + except IOError as e: + raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e)) + f.close() + + +def to_bytestring(value, encoding="utf8"): + """Converts a string argument to a byte string""" + if isinstance(value, bytes): + return value + if not isinstance(value, text_type): + raise TypeError('%r is not a string' % value) + + return value.encode(encoding) + +def has_fileno(obj): + if not hasattr(obj, "fileno"): + return False + + # check BytesIO case and maybe others + try: + obj.fileno() + except (AttributeError, IOError, io.UnsupportedOperation): + return False + + return True + + +def warn(msg): + print("!!!", file=sys.stderr) + + lines = msg.splitlines() + for i, line in enumerate(lines): + if i == 0: + line = "WARNING: %s" % line + print("!!! %s" % line, file=sys.stderr) + + print("!!!\n", file=sys.stderr) + sys.stderr.flush() + + +def make_fail_app(msg): + msg = to_bytestring(msg) + + def app(environ, start_response): + start_response("500 Internal Server Error", [ + ("Content-Type", "text/plain"), + ("Content-Length", str(len(msg))) + ]) + return [msg] + + return app + + +def split_request_uri(uri): + if uri.startswith("//"): + # When the path starts with //, urlsplit considers it as a + # relative uri while the RFC says we should consider it as abs_path + # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 + # We use temporary dot prefix to workaround this behaviour + parts = _compat.urlsplit("." + uri) + return parts._replace(path=parts.path[1:]) + + return _compat.urlsplit(uri) diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__init__.py b/venv/lib/python3.6/site-packages/gunicorn/workers/__init__.py new file mode 100644 index 0000000..074e001 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import sys + +# supported gunicorn workers. +SUPPORTED_WORKERS = { + "sync": "gunicorn.workers.sync.SyncWorker", + "eventlet": "gunicorn.workers.geventlet.EventletWorker", + "gevent": "gunicorn.workers.ggevent.GeventWorker", + "gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", + "gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", + "tornado": "gunicorn.workers.gtornado.TornadoWorker", + "gthread": "gunicorn.workers.gthread.ThreadWorker", +} + + +if sys.version_info >= (3, 4): + # gaiohttp worker can be used with Python 3.4+ only. + SUPPORTED_WORKERS["gaiohttp"] = "gunicorn.workers.gaiohttp.AiohttpWorker" diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/__init__.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..e5f76b4 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/_gaiohttp.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/_gaiohttp.cpython-36.pyc new file mode 100644 index 0000000..c0808fb Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/_gaiohttp.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base.cpython-36.pyc new file mode 100644 index 0000000..a3561f5 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base_async.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base_async.cpython-36.pyc new file mode 100644 index 0000000..1baa053 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/base_async.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gaiohttp.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gaiohttp.cpython-36.pyc new file mode 100644 index 0000000..b49c16a Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gaiohttp.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/geventlet.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/geventlet.cpython-36.pyc new file mode 100644 index 0000000..8d60a23 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/geventlet.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/ggevent.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/ggevent.cpython-36.pyc new file mode 100644 index 0000000..370749d Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/ggevent.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gthread.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gthread.cpython-36.pyc new file mode 100644 index 0000000..a67643b Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gthread.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gtornado.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gtornado.cpython-36.pyc new file mode 100644 index 0000000..265bd56 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/gtornado.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/sync.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/sync.cpython-36.pyc new file mode 100644 index 0000000..cdc9cd8 Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/sync.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/workertmp.cpython-36.pyc b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/workertmp.cpython-36.pyc new file mode 100644 index 0000000..c84ffec Binary files /dev/null and b/venv/lib/python3.6/site-packages/gunicorn/workers/__pycache__/workertmp.cpython-36.pyc differ diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py b/venv/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py new file mode 100644 index 0000000..fe378c3 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import asyncio +import datetime +import functools +import logging +import os + +try: + import ssl +except ImportError: + ssl = None + +import gunicorn.workers.base as base + +from aiohttp.wsgi import WSGIServerHttpProtocol as OldWSGIServerHttpProtocol + + +class WSGIServerHttpProtocol(OldWSGIServerHttpProtocol): + def log_access(self, request, environ, response, time): + self.logger.access(response, request, environ, datetime.timedelta(0, 0, time)) + + +class AiohttpWorker(base.Worker): + + def __init__(self, *args, **kw): # pragma: no cover + super().__init__(*args, **kw) + cfg = self.cfg + if cfg.is_ssl: + self.ssl_context = self._create_ssl_context(cfg) + else: + self.ssl_context = None + self.servers = [] + self.connections = {} + + def init_process(self): + # create new event_loop after fork + asyncio.get_event_loop().close() + + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + super().init_process() + + def run(self): + self._runner = asyncio.ensure_future(self._run(), loop=self.loop) + + try: + self.loop.run_until_complete(self._runner) + finally: + self.loop.close() + + def wrap_protocol(self, proto): + proto.connection_made = _wrp( + proto, proto.connection_made, self.connections) + proto.connection_lost = _wrp( + proto, proto.connection_lost, self.connections, False) + return proto + + def factory(self, wsgi, addr): + # are we in debug level + is_debug = self.log.loglevel == logging.DEBUG + + proto = WSGIServerHttpProtocol( + wsgi, readpayload=True, + loop=self.loop, + log=self.log, + debug=is_debug, + keep_alive=self.cfg.keepalive, + access_log=self.log.access_log, + access_log_format=self.cfg.access_log_format) + return self.wrap_protocol(proto) + + def get_factory(self, sock, addr): + return functools.partial(self.factory, self.wsgi, addr) + + @asyncio.coroutine + def close(self): + try: + if hasattr(self.wsgi, 'close'): + yield from self.wsgi.close() + except: + self.log.exception('Process shutdown exception') + + @asyncio.coroutine + def _run(self): + for sock in self.sockets: + factory = self.get_factory(sock.sock, sock.cfg_addr) + self.servers.append( + (yield from self._create_server(factory, sock))) + + # If our parent changed then we shut down. + pid = os.getpid() + try: + while self.alive or self.connections: + self.notify() + + if (self.alive and + pid == os.getpid() and self.ppid != os.getppid()): + self.log.info("Parent changed, shutting down: %s", self) + self.alive = False + + # stop accepting requests + if not self.alive: + if self.servers: + self.log.info( + "Stopping server: %s, connections: %s", + pid, len(self.connections)) + for server in self.servers: + server.close() + self.servers.clear() + + # prepare connections for closing + for conn in self.connections.values(): + if hasattr(conn, 'closing'): + conn.closing() + + yield from asyncio.sleep(1.0, loop=self.loop) + except KeyboardInterrupt: + pass + + if self.servers: + for server in self.servers: + server.close() + + yield from self.close() + + @asyncio.coroutine + def _create_server(self, factory, sock): + return self.loop.create_server(factory, sock=sock.sock, + ssl=self.ssl_context) + + @staticmethod + def _create_ssl_context(cfg): + """ Creates SSLContext instance for usage in asyncio.create_server. + + See ssl.SSLSocket.__init__ for more details. + """ + ctx = ssl.SSLContext(cfg.ssl_version) + ctx.load_cert_chain(cfg.certfile, cfg.keyfile) + ctx.verify_mode = cfg.cert_reqs + if cfg.ca_certs: + ctx.load_verify_locations(cfg.ca_certs) + if cfg.ciphers: + ctx.set_ciphers(cfg.ciphers) + return ctx + + +class _wrp: + + def __init__(self, proto, meth, tracking, add=True): + self._proto = proto + self._id = id(proto) + self._meth = meth + self._tracking = tracking + self._add = add + + def __call__(self, *args): + if self._add: + self._tracking[self._id] = self._proto + elif self._id in self._tracking: + del self._tracking[self._id] + + conn = self._meth(*args) + return conn diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/base.py b/venv/lib/python3.6/site-packages/gunicorn/workers/base.py new file mode 100644 index 0000000..881efa0 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/base.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from datetime import datetime +import os +from random import randint +import signal +from ssl import SSLError +import sys +import time +import traceback + +from gunicorn import six +from gunicorn import util +from gunicorn.workers.workertmp import WorkerTmp +from gunicorn.reloader import reloader_engines +from gunicorn.http.errors import ( + InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod, + InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders, +) +from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest +from gunicorn.http.errors import InvalidSchemeHeaders +from gunicorn.http.wsgi import default_environ, Response +from gunicorn.six import MAXSIZE + + +class Worker(object): + + SIGNALS = [getattr(signal, "SIG%s" % x) + for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()] + + PIPE = [] + + def __init__(self, age, ppid, sockets, app, timeout, cfg, log): + """\ + This is called pre-fork so it shouldn't do anything to the + current process. If there's a need to make process wide + changes you'll want to do that in ``self.init_process()``. + """ + self.age = age + self.pid = "[booting]" + self.ppid = ppid + self.sockets = sockets + self.app = app + self.timeout = timeout + self.cfg = cfg + self.booted = False + self.aborted = False + self.reloader = None + + self.nr = 0 + jitter = randint(0, cfg.max_requests_jitter) + self.max_requests = cfg.max_requests + jitter or MAXSIZE + self.alive = True + self.log = log + self.tmp = WorkerTmp(cfg) + + def __str__(self): + return "" % self.pid + + def notify(self): + """\ + Your worker subclass must arrange to have this method called + once every ``self.timeout`` seconds. If you fail in accomplishing + this task, the master process will murder your workers. + """ + self.tmp.notify() + + def run(self): + """\ + This is the mainloop of a worker process. You should override + this method in a subclass to provide the intended behaviour + for your particular evil schemes. + """ + raise NotImplementedError() + + def init_process(self): + """\ + If you override this method in a subclass, the last statement + in the function should be to call this method with + super(MyWorkerClass, self).init_process() so that the ``run()`` + loop is initiated. + """ + + # set environment' variables + if self.cfg.env: + for k, v in self.cfg.env.items(): + os.environ[k] = v + + util.set_owner_process(self.cfg.uid, self.cfg.gid, + initgroups=self.cfg.initgroups) + + # Reseed the random number generator + util.seed() + + # For waking ourselves up + self.PIPE = os.pipe() + for p in self.PIPE: + util.set_non_blocking(p) + util.close_on_exec(p) + + # Prevent fd inheritance + for s in self.sockets: + util.close_on_exec(s) + util.close_on_exec(self.tmp.fileno()) + + self.wait_fds = self.sockets + [self.PIPE[0]] + + self.log.close_on_exec() + + self.init_signals() + + # start the reloader + if self.cfg.reload: + def changed(fname): + self.log.info("Worker reloading: %s modified", fname) + self.alive = False + self.cfg.worker_int(self) + time.sleep(0.1) + sys.exit(0) + + reloader_cls = reloader_engines[self.cfg.reload_engine] + self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files, + callback=changed) + self.reloader.start() + + self.load_wsgi() + self.cfg.post_worker_init(self) + + # Enter main run loop + self.booted = True + self.run() + + def load_wsgi(self): + try: + self.wsgi = self.app.wsgi() + except SyntaxError as e: + if not self.cfg.reload: + raise + + self.log.exception(e) + + # fix from PR #1228 + # storing the traceback into exc_tb will create a circular reference. + # per https://docs.python.org/2/library/sys.html#sys.exc_info warning, + # delete the traceback after use. + try: + _, exc_val, exc_tb = sys.exc_info() + self.reloader.add_extra_file(exc_val.filename) + + tb_string = six.StringIO() + traceback.print_tb(exc_tb, file=tb_string) + self.wsgi = util.make_fail_app(tb_string.getvalue()) + finally: + del exc_tb + + def init_signals(self): + # reset signaling + for s in self.SIGNALS: + signal.signal(s, signal.SIG_DFL) + # init new signaling + signal.signal(signal.SIGQUIT, self.handle_quit) + signal.signal(signal.SIGTERM, self.handle_exit) + signal.signal(signal.SIGINT, self.handle_quit) + signal.signal(signal.SIGWINCH, self.handle_winch) + signal.signal(signal.SIGUSR1, self.handle_usr1) + signal.signal(signal.SIGABRT, self.handle_abort) + + # Don't let SIGTERM and SIGUSR1 disturb active requests + # by interrupting system calls + if hasattr(signal, 'siginterrupt'): # python >= 2.6 + signal.siginterrupt(signal.SIGTERM, False) + signal.siginterrupt(signal.SIGUSR1, False) + + if hasattr(signal, 'set_wakeup_fd'): + signal.set_wakeup_fd(self.PIPE[1]) + + def handle_usr1(self, sig, frame): + self.log.reopen_files() + + def handle_exit(self, sig, frame): + self.alive = False + + def handle_quit(self, sig, frame): + self.alive = False + # worker_int callback + self.cfg.worker_int(self) + time.sleep(0.1) + sys.exit(0) + + def handle_abort(self, sig, frame): + self.alive = False + self.cfg.worker_abort(self) + sys.exit(1) + + def handle_error(self, req, client, addr, exc): + request_start = datetime.now() + addr = addr or ('', -1) # unix socket case + if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod, + InvalidHTTPVersion, InvalidHeader, InvalidHeaderName, + LimitRequestLine, LimitRequestHeaders, + InvalidProxyLine, ForbiddenProxyRequest, + InvalidSchemeHeaders, + SSLError)): + + status_int = 400 + reason = "Bad Request" + + if isinstance(exc, InvalidRequestLine): + mesg = "Invalid Request Line '%s'" % str(exc) + elif isinstance(exc, InvalidRequestMethod): + mesg = "Invalid Method '%s'" % str(exc) + elif isinstance(exc, InvalidHTTPVersion): + mesg = "Invalid HTTP Version '%s'" % str(exc) + elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)): + mesg = "%s" % str(exc) + if not req and hasattr(exc, "req"): + req = exc.req # for access log + elif isinstance(exc, LimitRequestLine): + mesg = "%s" % str(exc) + elif isinstance(exc, LimitRequestHeaders): + mesg = "Error parsing headers: '%s'" % str(exc) + elif isinstance(exc, InvalidProxyLine): + mesg = "'%s'" % str(exc) + elif isinstance(exc, ForbiddenProxyRequest): + reason = "Forbidden" + mesg = "Request forbidden" + status_int = 403 + elif isinstance(exc, InvalidSchemeHeaders): + mesg = "%s" % str(exc) + elif isinstance(exc, SSLError): + reason = "Forbidden" + mesg = "'%s'" % str(exc) + status_int = 403 + + msg = "Invalid request from ip={ip}: {error}" + self.log.debug(msg.format(ip=addr[0], error=str(exc))) + else: + if hasattr(req, "uri"): + self.log.exception("Error handling request %s", req.uri) + status_int = 500 + reason = "Internal Server Error" + mesg = "" + + if req is not None: + request_time = datetime.now() - request_start + environ = default_environ(req, client, self.cfg) + environ['REMOTE_ADDR'] = addr[0] + environ['REMOTE_PORT'] = str(addr[1]) + resp = Response(req, client, self.cfg) + resp.status = "%s %s" % (status_int, reason) + resp.response_length = len(mesg) + self.log.access(resp, req, environ, request_time) + + try: + util.write_error(client, status_int, reason, mesg) + except: + self.log.debug("Failed to send error message.") + + def handle_winch(self, sig, fname): + # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD. + self.log.debug("worker: SIGWINCH ignored.") diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/base_async.py b/venv/lib/python3.6/site-packages/gunicorn/workers/base_async.py new file mode 100644 index 0000000..a3a0f91 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/base_async.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from datetime import datetime +import errno +import socket +import ssl +import sys + +import gunicorn.http as http +import gunicorn.http.wsgi as wsgi +import gunicorn.util as util +import gunicorn.workers.base as base +from gunicorn import six + +ALREADY_HANDLED = object() + + +class AsyncWorker(base.Worker): + + def __init__(self, *args, **kwargs): + super(AsyncWorker, self).__init__(*args, **kwargs) + self.worker_connections = self.cfg.worker_connections + + def timeout_ctx(self): + raise NotImplementedError() + + def is_already_handled(self, respiter): + # some workers will need to overload this function to raise a StopIteration + return respiter == ALREADY_HANDLED + + def handle(self, listener, client, addr): + req = None + try: + parser = http.RequestParser(self.cfg, client) + try: + listener_name = listener.getsockname() + if not self.cfg.keepalive: + req = six.next(parser) + self.handle_request(listener_name, req, client, addr) + else: + # keepalive loop + proxy_protocol_info = {} + while True: + req = None + with self.timeout_ctx(): + req = six.next(parser) + if not req: + break + if req.proxy_protocol_info: + proxy_protocol_info = req.proxy_protocol_info + else: + req.proxy_protocol_info = proxy_protocol_info + self.handle_request(listener_name, req, client, addr) + except http.errors.NoMoreData as e: + self.log.debug("Ignored premature client disconnection. %s", e) + except StopIteration as e: + self.log.debug("Closing connection. %s", e) + except ssl.SSLError: + # pass to next try-except level + six.reraise(*sys.exc_info()) + except EnvironmentError: + # pass to next try-except level + six.reraise(*sys.exc_info()) + except Exception as e: + self.handle_error(req, client, addr, e) + except ssl.SSLError as e: + if e.args[0] == ssl.SSL_ERROR_EOF: + self.log.debug("ssl connection closed") + client.close() + else: + self.log.debug("Error processing SSL request.") + self.handle_error(req, client, addr, e) + except EnvironmentError as e: + if e.errno not in (errno.EPIPE, errno.ECONNRESET): + self.log.exception("Socket error processing request.") + else: + if e.errno == errno.ECONNRESET: + self.log.debug("Ignoring connection reset") + else: + self.log.debug("Ignoring EPIPE") + except Exception as e: + self.handle_error(req, client, addr, e) + finally: + util.close(client) + + def handle_request(self, listener_name, req, sock, addr): + request_start = datetime.now() + environ = {} + resp = None + try: + self.cfg.pre_request(self, req) + resp, environ = wsgi.create(req, sock, addr, + listener_name, self.cfg) + environ["wsgi.multithread"] = True + self.nr += 1 + if self.alive and self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + resp.force_close() + self.alive = False + + if not self.cfg.keepalive: + resp.force_close() + + respiter = self.wsgi(environ, resp.start_response) + if self.is_already_handled(respiter): + return False + try: + if isinstance(respiter, environ['wsgi.file_wrapper']): + resp.write_file(respiter) + else: + for item in respiter: + resp.write(item) + resp.close() + request_time = datetime.now() - request_start + self.log.access(resp, req, environ, request_time) + finally: + if hasattr(respiter, "close"): + respiter.close() + if resp.should_close(): + raise StopIteration() + except StopIteration: + raise + except EnvironmentError: + # If the original exception was a socket.error we delegate + # handling it to the caller (where handle() might ignore it) + six.reraise(*sys.exc_info()) + except Exception: + if resp and resp.headers_sent: + # If the requests have already been sent, we should close the + # connection to indicate the error. + self.log.exception("Error handling request") + try: + sock.shutdown(socket.SHUT_RDWR) + sock.close() + except EnvironmentError: + pass + raise StopIteration() + raise + finally: + try: + self.cfg.post_request(self, req, environ, resp) + except Exception: + self.log.exception("Exception in post_request hook") + return True diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py b/venv/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py new file mode 100644 index 0000000..bef6b49 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import sys + +from gunicorn import util + +if sys.version_info >= (3, 4): + try: + import aiohttp # pylint: disable=unused-import + except ImportError: + raise RuntimeError("You need aiohttp installed to use this worker.") + else: + try: + from aiohttp.worker import GunicornWebWorker as AiohttpWorker + except ImportError: + from gunicorn.workers._gaiohttp import AiohttpWorker + + util.warn( + "The 'gaiohttp' worker is deprecated. See --worker-class " + "documentation for more information." + ) + __all__ = ['AiohttpWorker'] +else: + raise RuntimeError("You need Python >= 3.4 to use the gaiohttp worker") diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/geventlet.py b/venv/lib/python3.6/site-packages/gunicorn/workers/geventlet.py new file mode 100644 index 0000000..189062c --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/geventlet.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +from functools import partial +import errno +import sys + +try: + import eventlet +except ImportError: + raise RuntimeError("You need eventlet installed to use this worker.") + +# validate the eventlet version +if eventlet.version_info < (0, 9, 7): + raise RuntimeError("You need eventlet >= 0.9.7") + + +from eventlet import hubs, greenthread +from eventlet.greenio import GreenSocket +from eventlet.hubs import trampoline +from eventlet.wsgi import ALREADY_HANDLED as EVENTLET_ALREADY_HANDLED +import greenlet + +from gunicorn.http.wsgi import sendfile as o_sendfile +from gunicorn.workers.base_async import AsyncWorker + +def _eventlet_sendfile(fdout, fdin, offset, nbytes): + while True: + try: + return o_sendfile(fdout, fdin, offset, nbytes) + except OSError as e: + if e.args[0] == errno.EAGAIN: + trampoline(fdout, write=True) + else: + raise + + +def _eventlet_serve(sock, handle, concurrency): + """ + Serve requests forever. + + This code is nearly identical to ``eventlet.convenience.serve`` except + that it attempts to join the pool at the end, which allows for gunicorn + graceful shutdowns. + """ + pool = eventlet.greenpool.GreenPool(concurrency) + server_gt = eventlet.greenthread.getcurrent() + + while True: + try: + conn, addr = sock.accept() + gt = pool.spawn(handle, conn, addr) + gt.link(_eventlet_stop, server_gt, conn) + conn, addr, gt = None, None, None + except eventlet.StopServe: + sock.close() + pool.waitall() + return + + +def _eventlet_stop(client, server, conn): + """ + Stop a greenlet handling a request and close its connection. + + This code is lifted from eventlet so as not to depend on undocumented + functions in the library. + """ + try: + try: + client.wait() + finally: + conn.close() + except greenlet.GreenletExit: + pass + except Exception: + greenthread.kill(server, *sys.exc_info()) + + +def patch_sendfile(): + from gunicorn.http import wsgi + + if o_sendfile is not None: + setattr(wsgi, "sendfile", _eventlet_sendfile) + + +class EventletWorker(AsyncWorker): + + def patch(self): + hubs.use_hub() + eventlet.monkey_patch(os=False) + patch_sendfile() + + def is_already_handled(self, respiter): + if respiter == EVENTLET_ALREADY_HANDLED: + raise StopIteration() + else: + return super(EventletWorker, self).is_already_handled(respiter) + + def init_process(self): + super(EventletWorker, self).init_process() + self.patch() + + def handle_quit(self, sig, frame): + eventlet.spawn(super(EventletWorker, self).handle_quit, sig, frame) + + def handle_usr1(self, sig, frame): + eventlet.spawn(super(EventletWorker, self).handle_usr1, sig, frame) + + def timeout_ctx(self): + return eventlet.Timeout(self.cfg.keepalive or None, False) + + def handle(self, listener, client, addr): + if self.cfg.is_ssl: + client = eventlet.wrap_ssl(client, server_side=True, + **self.cfg.ssl_options) + + super(EventletWorker, self).handle(listener, client, addr) + + def run(self): + acceptors = [] + for sock in self.sockets: + gsock = GreenSocket(sock) + gsock.setblocking(1) + hfun = partial(self.handle, gsock) + acceptor = eventlet.spawn(_eventlet_serve, gsock, hfun, + self.worker_connections) + + acceptors.append(acceptor) + eventlet.sleep(0.0) + + while self.alive: + self.notify() + eventlet.sleep(1.0) + + self.notify() + try: + with eventlet.Timeout(self.cfg.graceful_timeout) as t: + for a in acceptors: + a.kill(eventlet.StopServe()) + for a in acceptors: + a.wait() + except eventlet.Timeout as te: + if te != t: + raise + for a in acceptors: + a.kill() diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/ggevent.py b/venv/lib/python3.6/site-packages/gunicorn/workers/ggevent.py new file mode 100644 index 0000000..fb9d919 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/ggevent.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import errno +import os +import sys +from datetime import datetime +from functools import partial +import time + +_socket = __import__("socket") + +# workaround on osx, disable kqueue +if sys.platform == "darwin": + os.environ['EVENT_NOKQUEUE'] = "1" + +try: + import gevent +except ImportError: + raise RuntimeError("You need gevent installed to use this worker.") +from gevent.pool import Pool +from gevent.server import StreamServer +from gevent.socket import wait_write, socket +from gevent import pywsgi + +import gunicorn +from gunicorn.http.wsgi import base_environ +from gunicorn.workers.base_async import AsyncWorker +from gunicorn.http.wsgi import sendfile as o_sendfile + +VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__) + +def _gevent_sendfile(fdout, fdin, offset, nbytes): + while True: + try: + return o_sendfile(fdout, fdin, offset, nbytes) + except OSError as e: + if e.args[0] == errno.EAGAIN: + wait_write(fdout) + else: + raise + +def patch_sendfile(): + from gunicorn.http import wsgi + + if o_sendfile is not None: + setattr(wsgi, "sendfile", _gevent_sendfile) + + +class GeventWorker(AsyncWorker): + + server_class = None + wsgi_handler = None + + def patch(self): + from gevent import monkey + monkey.noisy = False + + # if the new version is used make sure to patch subprocess + if gevent.version_info[0] == 0: + monkey.patch_all() + else: + monkey.patch_all(subprocess=True) + + # monkey patch sendfile to make it none blocking + patch_sendfile() + + # patch sockets + sockets = [] + for s in self.sockets: + if sys.version_info[0] == 3: + sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM, + fileno=s.sock.fileno())) + else: + sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM, + _sock=s)) + self.sockets = sockets + + def notify(self): + super(GeventWorker, self).notify() + if self.ppid != os.getppid(): + self.log.info("Parent changed, shutting down: %s", self) + sys.exit(0) + + def timeout_ctx(self): + return gevent.Timeout(self.cfg.keepalive, False) + + def run(self): + servers = [] + ssl_args = {} + + if self.cfg.is_ssl: + ssl_args = dict(server_side=True, **self.cfg.ssl_options) + + for s in self.sockets: + s.setblocking(1) + pool = Pool(self.worker_connections) + if self.server_class is not None: + environ = base_environ(self.cfg) + environ.update({ + "wsgi.multithread": True, + "SERVER_SOFTWARE": VERSION, + }) + server = self.server_class( + s, application=self.wsgi, spawn=pool, log=self.log, + handler_class=self.wsgi_handler, environ=environ, + **ssl_args) + else: + hfun = partial(self.handle, s) + server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) + + server.start() + servers.append(server) + + while self.alive: + self.notify() + gevent.sleep(1.0) + + try: + # Stop accepting requests + for server in servers: + if hasattr(server, 'close'): # gevent 1.0 + server.close() + if hasattr(server, 'kill'): # gevent < 1.0 + server.kill() + + # Handle current requests until graceful_timeout + ts = time.time() + while time.time() - ts <= self.cfg.graceful_timeout: + accepting = 0 + for server in servers: + if server.pool.free_count() != server.pool.size: + accepting += 1 + + # if no server is accepting a connection, we can exit + if not accepting: + return + + self.notify() + gevent.sleep(1.0) + + # Force kill all active the handlers + self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) + for server in servers: + server.stop(timeout=1) + except: + pass + + def handle(self, listener, client, addr): + # Connected socket timeout defaults to socket.getdefaulttimeout(). + # This forces to blocking mode. + client.setblocking(1) + super(GeventWorker, self).handle(listener, client, addr) + + def handle_request(self, listener_name, req, sock, addr): + try: + super(GeventWorker, self).handle_request(listener_name, req, sock, + addr) + except gevent.GreenletExit: + pass + except SystemExit: + pass + + def handle_quit(self, sig, frame): + # Move this out of the signal handler so we can use + # blocking calls. See #1126 + gevent.spawn(super(GeventWorker, self).handle_quit, sig, frame) + + def handle_usr1(self, sig, frame): + # Make the gevent workers handle the usr1 signal + # by deferring to a new greenlet. See #1645 + gevent.spawn(super(GeventWorker, self).handle_usr1, sig, frame) + + if gevent.version_info[0] == 0: + + def init_process(self): + # monkey patch here + self.patch() + + # reinit the hub + import gevent.core + gevent.core.reinit() + + #gevent 0.13 and older doesn't reinitialize dns for us after forking + #here's the workaround + gevent.core.dns_shutdown(fail_requests=1) + gevent.core.dns_init() + super(GeventWorker, self).init_process() + + else: + + def init_process(self): + # monkey patch here + self.patch() + + # reinit the hub + from gevent import hub + hub.reinit() + + # then initialize the process + super(GeventWorker, self).init_process() + + +class GeventResponse(object): + + status = None + headers = None + sent = None + + def __init__(self, status, headers, clength): + self.status = status + self.headers = headers + self.sent = clength + + +class PyWSGIHandler(pywsgi.WSGIHandler): + + def log_request(self): + start = datetime.fromtimestamp(self.time_start) + finish = datetime.fromtimestamp(self.time_finish) + response_time = finish - start + resp_headers = getattr(self, 'response_headers', {}) + resp = GeventResponse(self.status, resp_headers, self.response_length) + if hasattr(self, 'headers'): + req_headers = self.headers.items() + else: + req_headers = [] + self.server.log.access(resp, req_headers, self.environ, response_time) + + def get_environ(self): + env = super(PyWSGIHandler, self).get_environ() + env['gunicorn.sock'] = self.socket + env['RAW_URI'] = self.path + return env + + +class PyWSGIServer(pywsgi.WSGIServer): + pass + + +class GeventPyWSGIWorker(GeventWorker): + "The Gevent StreamServer based workers." + server_class = PyWSGIServer + wsgi_handler = PyWSGIHandler diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/gthread.py b/venv/lib/python3.6/site-packages/gunicorn/workers/gthread.py new file mode 100644 index 0000000..862f873 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/gthread.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +# design: +# a threaded worker accepts connections in the main loop, accepted +# connections are are added to the thread pool as a connection job. On +# keepalive connections are put back in the loop waiting for an event. +# If no event happen after the keep alive timeout, the connectoin is +# closed. + +from collections import deque +from datetime import datetime +import errno +from functools import partial +import os +import socket +import ssl +import sys +from threading import RLock +import time + +from .. import http +from ..http import wsgi +from .. import util +from . import base +from .. import six + + +try: + import concurrent.futures as futures +except ImportError: + raise RuntimeError(""" + You need to install the 'futures' package to use this worker with this + Python version. + """) + +try: + from asyncio import selectors +except ImportError: + from gunicorn import selectors + + +class TConn(object): + + def __init__(self, cfg, sock, client, server): + self.cfg = cfg + self.sock = sock + self.client = client + self.server = server + + self.timeout = None + self.parser = None + + # set the socket to non blocking + self.sock.setblocking(False) + + def init(self): + self.sock.setblocking(True) + if self.parser is None: + # wrap the socket if needed + if self.cfg.is_ssl: + self.sock = ssl.wrap_socket(self.sock, server_side=True, + **self.cfg.ssl_options) + + # initialize the parser + self.parser = http.RequestParser(self.cfg, self.sock) + + def set_timeout(self): + # set the timeout + self.timeout = time.time() + self.cfg.keepalive + + def close(self): + util.close(self.sock) + + +class ThreadWorker(base.Worker): + + def __init__(self, *args, **kwargs): + super(ThreadWorker, self).__init__(*args, **kwargs) + self.worker_connections = self.cfg.worker_connections + self.max_keepalived = self.cfg.worker_connections - self.cfg.threads + # initialise the pool + self.tpool = None + self.poller = None + self._lock = None + self.futures = deque() + self._keep = deque() + self.nr_conns = 0 + + @classmethod + def check_config(cls, cfg, log): + max_keepalived = cfg.worker_connections - cfg.threads + + if max_keepalived <= 0 and cfg.keepalive: + log.warning("No keepalived connections can be handled. " + + "Check the number of worker connections and threads.") + + def init_process(self): + self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads) + self.poller = selectors.DefaultSelector() + self._lock = RLock() + super(ThreadWorker, self).init_process() + + def handle_quit(self, sig, frame): + self.alive = False + # worker_int callback + self.cfg.worker_int(self) + self.tpool.shutdown(False) + time.sleep(0.1) + sys.exit(0) + + def _wrap_future(self, fs, conn): + fs.conn = conn + self.futures.append(fs) + fs.add_done_callback(self.finish_request) + + def enqueue_req(self, conn): + conn.init() + # submit the connection to a worker + fs = self.tpool.submit(self.handle, conn) + self._wrap_future(fs, conn) + + def accept(self, server, listener): + try: + sock, client = listener.accept() + # initialize the connection object + conn = TConn(self.cfg, sock, client, server) + self.nr_conns += 1 + # enqueue the job + self.enqueue_req(conn) + except EnvironmentError as e: + if e.errno not in (errno.EAGAIN, + errno.ECONNABORTED, errno.EWOULDBLOCK): + raise + + def reuse_connection(self, conn, client): + with self._lock: + # unregister the client from the poller + self.poller.unregister(client) + # remove the connection from keepalive + try: + self._keep.remove(conn) + except ValueError: + # race condition + return + + # submit the connection to a worker + self.enqueue_req(conn) + + def murder_keepalived(self): + now = time.time() + while True: + with self._lock: + try: + # remove the connection from the queue + conn = self._keep.popleft() + except IndexError: + break + + delta = conn.timeout - now + if delta > 0: + # add the connection back to the queue + with self._lock: + self._keep.appendleft(conn) + break + else: + self.nr_conns -= 1 + # remove the socket from the poller + with self._lock: + try: + self.poller.unregister(conn.sock) + except EnvironmentError as e: + if e.errno != errno.EBADF: + raise + except KeyError: + # already removed by the system, continue + pass + + # close the socket + conn.close() + + def is_parent_alive(self): + # If our parent changed then we shut down. + if self.ppid != os.getppid(): + self.log.info("Parent changed, shutting down: %s", self) + return False + return True + + def run(self): + # init listeners, add them to the event loop + for sock in self.sockets: + sock.setblocking(False) + # a race condition during graceful shutdown may make the listener + # name unavailable in the request handler so capture it once here + server = sock.getsockname() + acceptor = partial(self.accept, server) + self.poller.register(sock, selectors.EVENT_READ, acceptor) + + while self.alive: + # notify the arbiter we are alive + self.notify() + + # can we accept more connections? + if self.nr_conns < self.worker_connections: + # wait for an event + events = self.poller.select(1.0) + for key, _ in events: + callback = key.data + callback(key.fileobj) + + # check (but do not wait) for finished requests + result = futures.wait(self.futures, timeout=0, + return_when=futures.FIRST_COMPLETED) + else: + # wait for a request to finish + result = futures.wait(self.futures, timeout=1.0, + return_when=futures.FIRST_COMPLETED) + + # clean up finished requests + for fut in result.done: + self.futures.remove(fut) + + if not self.is_parent_alive(): + break + + # hanle keepalive timeouts + self.murder_keepalived() + + self.tpool.shutdown(False) + self.poller.close() + + for s in self.sockets: + s.close() + + futures.wait(self.futures, timeout=self.cfg.graceful_timeout) + + def finish_request(self, fs): + if fs.cancelled(): + self.nr_conns -= 1 + fs.conn.close() + return + + try: + (keepalive, conn) = fs.result() + # if the connection should be kept alived add it + # to the eventloop and record it + if keepalive: + # flag the socket as non blocked + conn.sock.setblocking(False) + + # register the connection + conn.set_timeout() + with self._lock: + self._keep.append(conn) + + # add the socket to the event loop + self.poller.register(conn.sock, selectors.EVENT_READ, + partial(self.reuse_connection, conn)) + else: + self.nr_conns -= 1 + conn.close() + except: + # an exception happened, make sure to close the + # socket. + self.nr_conns -= 1 + fs.conn.close() + + def handle(self, conn): + keepalive = False + req = None + try: + req = six.next(conn.parser) + if not req: + return (False, conn) + + # handle the request + keepalive = self.handle_request(req, conn) + if keepalive: + return (keepalive, conn) + except http.errors.NoMoreData as e: + self.log.debug("Ignored premature client disconnection. %s", e) + + except StopIteration as e: + self.log.debug("Closing connection. %s", e) + except ssl.SSLError as e: + if e.args[0] == ssl.SSL_ERROR_EOF: + self.log.debug("ssl connection closed") + conn.sock.close() + else: + self.log.debug("Error processing SSL request.") + self.handle_error(req, conn.sock, conn.client, e) + + except EnvironmentError as e: + if e.errno not in (errno.EPIPE, errno.ECONNRESET): + self.log.exception("Socket error processing request.") + else: + if e.errno == errno.ECONNRESET: + self.log.debug("Ignoring connection reset") + else: + self.log.debug("Ignoring connection epipe") + except Exception as e: + self.handle_error(req, conn.sock, conn.client, e) + + return (False, conn) + + def handle_request(self, req, conn): + environ = {} + resp = None + try: + self.cfg.pre_request(self, req) + request_start = datetime.now() + resp, environ = wsgi.create(req, conn.sock, conn.client, + conn.server, self.cfg) + environ["wsgi.multithread"] = True + self.nr += 1 + if self.alive and self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + resp.force_close() + self.alive = False + + if not self.cfg.keepalive: + resp.force_close() + elif len(self._keep) >= self.max_keepalived: + resp.force_close() + + respiter = self.wsgi(environ, resp.start_response) + try: + if isinstance(respiter, environ['wsgi.file_wrapper']): + resp.write_file(respiter) + else: + for item in respiter: + resp.write(item) + + resp.close() + request_time = datetime.now() - request_start + self.log.access(resp, req, environ, request_time) + finally: + if hasattr(respiter, "close"): + respiter.close() + + if resp.should_close(): + self.log.debug("Closing connection.") + return False + except EnvironmentError: + # pass to next try-except level + six.reraise(*sys.exc_info()) + except Exception: + if resp and resp.headers_sent: + # If the requests have already been sent, we should close the + # connection to indicate the error. + self.log.exception("Error handling request") + try: + conn.sock.shutdown(socket.SHUT_RDWR) + conn.sock.close() + except EnvironmentError: + pass + raise StopIteration() + raise + finally: + try: + self.cfg.post_request(self, req, environ, resp) + except Exception: + self.log.exception("Exception in post_request hook") + + return True diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/gtornado.py b/venv/lib/python3.6/site-packages/gunicorn/workers/gtornado.py new file mode 100644 index 0000000..7c1b118 --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/gtornado.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import copy +import os +import sys + +try: + import tornado +except ImportError: + raise RuntimeError("You need tornado installed to use this worker.") +import tornado.web +import tornado.httpserver +from tornado.ioloop import IOLoop, PeriodicCallback +from tornado.wsgi import WSGIContainer +from gunicorn.workers.base import Worker +from gunicorn import __version__ as gversion + + +# `io_loop` arguments to many Tornado functions have been removed in Tornado 5.0 +# +IOLOOP_PARAMETER_REMOVED = tornado.version_info >= (5, 0, 0) + + +class TornadoWorker(Worker): + + @classmethod + def setup(cls): + web = sys.modules.pop("tornado.web") + old_clear = web.RequestHandler.clear + + def clear(self): + old_clear(self) + if "Gunicorn" not in self._headers["Server"]: + self._headers["Server"] += " (Gunicorn/%s)" % gversion + web.RequestHandler.clear = clear + sys.modules["tornado.web"] = web + + def handle_exit(self, sig, frame): + if self.alive: + super(TornadoWorker, self).handle_exit(sig, frame) + + def handle_request(self): + self.nr += 1 + if self.alive and self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + self.alive = False + + def watchdog(self): + if self.alive: + self.notify() + + if self.ppid != os.getppid(): + self.log.info("Parent changed, shutting down: %s", self) + self.alive = False + + def heartbeat(self): + if not self.alive: + if self.server_alive: + if hasattr(self, 'server'): + try: + self.server.stop() + except Exception: + pass + self.server_alive = False + else: + if not self.ioloop._callbacks: + self.ioloop.stop() + + def run(self): + self.ioloop = IOLoop.instance() + self.alive = True + self.server_alive = False + if IOLOOP_PARAMETER_REMOVED: + PeriodicCallback(self.watchdog, 1000).start() + PeriodicCallback(self.heartbeat, 1000).start() + else: + PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start() + PeriodicCallback(self.heartbeat, 1000, io_loop=self.ioloop).start() + + # Assume the app is a WSGI callable if its not an + # instance of tornado.web.Application or is an + # instance of tornado.wsgi.WSGIApplication + app = self.wsgi + if not isinstance(app, tornado.web.Application) or \ + isinstance(app, tornado.wsgi.WSGIApplication): + app = WSGIContainer(app) + + # Monkey-patching HTTPConnection.finish to count the + # number of requests being handled by Tornado. This + # will help gunicorn shutdown the worker if max_requests + # is exceeded. + httpserver = sys.modules["tornado.httpserver"] + if hasattr(httpserver, 'HTTPConnection'): + old_connection_finish = httpserver.HTTPConnection.finish + + def finish(other): + self.handle_request() + old_connection_finish(other) + httpserver.HTTPConnection.finish = finish + sys.modules["tornado.httpserver"] = httpserver + + server_class = tornado.httpserver.HTTPServer + else: + + class _HTTPServer(tornado.httpserver.HTTPServer): + + def on_close(instance, server_conn): + self.handle_request() + super(_HTTPServer, instance).on_close(server_conn) + + server_class = _HTTPServer + + if self.cfg.is_ssl: + _ssl_opt = copy.deepcopy(self.cfg.ssl_options) + # tornado refuses initialization if ssl_options contains following + # options + del _ssl_opt["do_handshake_on_connect"] + del _ssl_opt["suppress_ragged_eofs"] + if IOLOOP_PARAMETER_REMOVED: + server = server_class(app, ssl_options=_ssl_opt) + else: + server = server_class(app, io_loop=self.ioloop, + ssl_options=_ssl_opt) + else: + if IOLOOP_PARAMETER_REMOVED: + server = server_class(app) + else: + server = server_class(app, io_loop=self.ioloop) + + self.server = server + self.server_alive = True + + for s in self.sockets: + s.setblocking(0) + if hasattr(server, "add_socket"): # tornado > 2.0 + server.add_socket(s) + elif hasattr(server, "_sockets"): # tornado 2.0 + server._sockets[s.fileno()] = s + + server.no_keep_alive = self.cfg.keepalive <= 0 + server.start(num_processes=1) + + self.ioloop.start() diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/sync.py b/venv/lib/python3.6/site-packages/gunicorn/workers/sync.py new file mode 100644 index 0000000..1d2ce2f --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/sync.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. +# + +from datetime import datetime +import errno +import os +import select +import socket +import ssl +import sys + +import gunicorn.http as http +import gunicorn.http.wsgi as wsgi +import gunicorn.util as util +import gunicorn.workers.base as base +from gunicorn import six + +class StopWaiting(Exception): + """ exception raised to stop waiting for a connnection """ + +class SyncWorker(base.Worker): + + def accept(self, listener): + client, addr = listener.accept() + client.setblocking(1) + util.close_on_exec(client) + self.handle(listener, client, addr) + + def wait(self, timeout): + try: + self.notify() + ret = select.select(self.wait_fds, [], [], timeout) + if ret[0]: + if self.PIPE[0] in ret[0]: + os.read(self.PIPE[0], 1) + return ret[0] + + except select.error as e: + if e.args[0] == errno.EINTR: + return self.sockets + if e.args[0] == errno.EBADF: + if self.nr < 0: + return self.sockets + else: + raise StopWaiting + raise + + def is_parent_alive(self): + # If our parent changed then we shut down. + if self.ppid != os.getppid(): + self.log.info("Parent changed, shutting down: %s", self) + return False + return True + + def run_for_one(self, timeout): + listener = self.sockets[0] + while self.alive: + self.notify() + + # Accept a connection. If we get an error telling us + # that no connection is waiting we fall down to the + # select which is where we'll wait for a bit for new + # workers to come give us some love. + try: + self.accept(listener) + # Keep processing clients until no one is waiting. This + # prevents the need to select() for every client that we + # process. + continue + + except EnvironmentError as e: + if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, + errno.EWOULDBLOCK): + raise + + if not self.is_parent_alive(): + return + + try: + self.wait(timeout) + except StopWaiting: + return + + def run_for_multiple(self, timeout): + while self.alive: + self.notify() + + try: + ready = self.wait(timeout) + except StopWaiting: + return + + if ready is not None: + for listener in ready: + if listener == self.PIPE[0]: + continue + + try: + self.accept(listener) + except EnvironmentError as e: + if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, + errno.EWOULDBLOCK): + raise + + if not self.is_parent_alive(): + return + + def run(self): + # if no timeout is given the worker will never wait and will + # use the CPU for nothing. This minimal timeout prevent it. + timeout = self.timeout or 0.5 + + # self.socket appears to lose its blocking status after + # we fork in the arbiter. Reset it here. + for s in self.sockets: + s.setblocking(0) + + if len(self.sockets) > 1: + self.run_for_multiple(timeout) + else: + self.run_for_one(timeout) + + def handle(self, listener, client, addr): + req = None + try: + if self.cfg.is_ssl: + client = ssl.wrap_socket(client, server_side=True, + **self.cfg.ssl_options) + + parser = http.RequestParser(self.cfg, client) + req = six.next(parser) + self.handle_request(listener, req, client, addr) + except http.errors.NoMoreData as e: + self.log.debug("Ignored premature client disconnection. %s", e) + except StopIteration as e: + self.log.debug("Closing connection. %s", e) + except ssl.SSLError as e: + if e.args[0] == ssl.SSL_ERROR_EOF: + self.log.debug("ssl connection closed") + client.close() + else: + self.log.debug("Error processing SSL request.") + self.handle_error(req, client, addr, e) + except EnvironmentError as e: + if e.errno not in (errno.EPIPE, errno.ECONNRESET): + self.log.exception("Socket error processing request.") + else: + if e.errno == errno.ECONNRESET: + self.log.debug("Ignoring connection reset") + else: + self.log.debug("Ignoring EPIPE") + except Exception as e: + self.handle_error(req, client, addr, e) + finally: + util.close(client) + + def handle_request(self, listener, req, client, addr): + environ = {} + resp = None + try: + self.cfg.pre_request(self, req) + request_start = datetime.now() + resp, environ = wsgi.create(req, client, addr, + listener.getsockname(), self.cfg) + # Force the connection closed until someone shows + # a buffering proxy that supports Keep-Alive to + # the backend. + resp.force_close() + self.nr += 1 + if self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + self.alive = False + respiter = self.wsgi(environ, resp.start_response) + try: + if isinstance(respiter, environ['wsgi.file_wrapper']): + resp.write_file(respiter) + else: + for item in respiter: + resp.write(item) + resp.close() + request_time = datetime.now() - request_start + self.log.access(resp, req, environ, request_time) + finally: + if hasattr(respiter, "close"): + respiter.close() + except EnvironmentError: + # pass to next try-except level + six.reraise(*sys.exc_info()) + except Exception: + if resp and resp.headers_sent: + # If the requests have already been sent, we should close the + # connection to indicate the error. + self.log.exception("Error handling request") + try: + client.shutdown(socket.SHUT_RDWR) + client.close() + except EnvironmentError: + pass + raise StopIteration() + raise + finally: + try: + self.cfg.post_request(self, req, environ, resp) + except Exception: + self.log.exception("Exception in post_request hook") diff --git a/venv/lib/python3.6/site-packages/gunicorn/workers/workertmp.py b/venv/lib/python3.6/site-packages/gunicorn/workers/workertmp.py new file mode 100644 index 0000000..36bc97a --- /dev/null +++ b/venv/lib/python3.6/site-packages/gunicorn/workers/workertmp.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 - +# +# This file is part of gunicorn released under the MIT license. +# See the NOTICE for more information. + +import os +import platform +import tempfile + +from gunicorn import util + +PLATFORM = platform.system() +IS_CYGWIN = PLATFORM.startswith('CYGWIN') + + +class WorkerTmp(object): + + def __init__(self, cfg): + old_umask = os.umask(cfg.umask) + fdir = cfg.worker_tmp_dir + if fdir and not os.path.isdir(fdir): + raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir) + fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir) + + # allows the process to write to the file + util.chown(name, cfg.uid, cfg.gid) + os.umask(old_umask) + + # unlink the file so we don't leak tempory files + try: + if not IS_CYGWIN: + util.unlink(name) + self._tmp = os.fdopen(fd, 'w+b', 1) + except: + os.close(fd) + raise + + self.spinner = 0 + + def notify(self): + try: + self.spinner = (self.spinner + 1) % 2 + os.fchmod(self._tmp.fileno(), self.spinner) + except AttributeError: + # python < 2.6 + self._tmp.truncate(0) + os.write(self._tmp.fileno(), b"X") + + def last_update(self): + return os.fstat(self._tmp.fileno()).st_ctime + + def fileno(self): + return self._tmp.fileno() + + def close(self): + return self._tmp.close()